Newer
Older
info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
} else {
if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
return NULL;
dst = c->cfg.dst_addr;
if (c->is_40bit_channel)
dst |= 0x400000000ull;
src = buf_addr;
info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
/* non-lite channels can write zeroes w/o accessing memory */
if (buf_addr == od->zero_page && !c->is_lite_channel)
info |= BCM2835_DMA_S_IGNORE;
/* calculate number of frames */
frames = /* number of periods */
DIV_ROUND_UP(buf_len, period_len) *
/* number of frames per period */
bcm2835_dma_frames_for_length(period_len, max_len);
* allocate the CB chain
* note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine
* implementation calls prep_dma_cyclic with interrupts disabled.
d = bcm2835_dma_create_cb_chain(c, direction, true,
info, extra,
frames, src, dst, buf_len,
period_len, GFP_NOWAIT);
if (!d)
return NULL;
/* wrap around into a loop */
if (c->is_40bit_channel)
((struct bcm2711_dma40_scb *)
d->cb_list[frames - 1].cb)->next_cb =
to_bcm2711_cbaddr(d->cb_list[0].paddr);
else
d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
return vchan_tx_prep(&c->vc, &d->vd, flags);
}
static int bcm2835_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
c->cfg = *cfg;
return 0;
}
static int bcm2835_dma_terminate_all(struct dma_chan *chan)
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
vchan_terminate_vdesc(&c->desc->vd);
}
vchan_get_all_descriptors(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
vchan_dma_desc_free_list(&c->vc, &head);
return 0;
}
static void bcm2835_dma_synchronize(struct dma_chan *chan)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
vchan_synchronize(&c->vc);
}
static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
int irq, unsigned int irq_flags)
{
struct bcm2835_chan *c;
c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
c->vc.desc_free = bcm2835_dma_desc_free;
vchan_init(&c->vc, &d->ddev);
c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
c->ch = chan_id;
c->irq_number = irq;
c->irq_flags = irq_flags;
/* check for 40bit and lite channels */
if (d->cfg_data->chan_40bit_mask & BIT(chan_id))
c->is_40bit_channel = true;
else if (readl(c->chan_base + BCM2835_DMA_DEBUG) &
BCM2835_DMA_DEBUG_LITE)
c->is_lite_channel = true;
return 0;
}
static void bcm2835_dma_free(struct bcm2835_dmadev *od)
{
struct bcm2835_chan *c, *next;
list_for_each_entry_safe(c, next, &od->ddev.channels,
vc.chan.device_node) {
list_del(&c->vc.chan.device_node);
tasklet_kill(&c->vc.task);
}
dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
int bcm2711_dma40_memcpy_init(void)
{
if (!memcpy_parent)
return -EPROBE_DEFER;
if (!memcpy_chan)
return -EINVAL;
if (!memcpy_scb)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(bcm2711_dma40_memcpy_init);
void bcm2711_dma40_memcpy(dma_addr_t dst, dma_addr_t src, size_t size)
{
struct bcm2711_dma40_scb *scb = memcpy_scb;
unsigned long flags;
if (!scb) {
pr_err("bcm2711_dma40_memcpy not initialised!\n");
return;
}
spin_lock_irqsave(&memcpy_lock, flags);
scb->ti = 0;
scb->src = lower_32_bits(src);
scb->srci = upper_32_bits(src) | BCM2711_DMA40_MEMCPY_XFER_INFO;
scb->dst = lower_32_bits(dst);
scb->dsti = upper_32_bits(dst) | BCM2711_DMA40_MEMCPY_XFER_INFO;
scb->len = size;
scb->next_cb = 0;
writel((u32)(memcpy_scb_dma >> 5), memcpy_chan + BCM2711_DMA40_CB);
writel(BCM2711_DMA40_MEMCPY_FLAGS + BCM2711_DMA40_ACTIVE,
memcpy_chan + BCM2711_DMA40_CS);
/* Poll for completion */
while (!(readl(memcpy_chan + BCM2711_DMA40_CS) & BCM2711_DMA40_END))
cpu_relax();
writel(BCM2711_DMA40_END, memcpy_chan + BCM2711_DMA40_CS);
spin_unlock_irqrestore(&memcpy_lock, flags);
}
EXPORT_SYMBOL(bcm2711_dma40_memcpy);
static const struct of_device_id bcm2835_dma_of_match[] = {
{ .compatible = "brcm,bcm2835-dma", .data = &bcm2835_dma_cfg },
{ .compatible = "brcm,bcm2711-dma", .data = &bcm2711_dma_cfg },
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
{},
};
MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
struct of_dma *ofdma)
{
struct bcm2835_dmadev *d = ofdma->of_dma_data;
struct dma_chan *chan;
chan = dma_get_any_slave_channel(&d->ddev);
if (!chan)
return NULL;
/* Set DREQ from param */
to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
return chan;
}
static int bcm2835_dma_probe(struct platform_device *pdev)
{
const struct bcm2835_dma_cfg_data *cfg_data;
const struct of_device_id *of_id;
struct bcm2835_dmadev *od;
struct resource *res;
void __iomem *base;
int rc;
int i, j;
int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1];
int irq_flags;
char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
int chan_count, chan_start, chan_end;
of_id = of_match_node(bcm2835_dma_of_match, pdev->dev.of_node);
if (!of_id) {
dev_err(&pdev->dev, "Failed to match compatible string\n");
return -EINVAL;
}
cfg_data = of_id->data;
if (!pdev->dev.dma_mask)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
rc = dma_set_mask_and_coherent(&pdev->dev, cfg_data->dma_mask);
if (rc) {
dev_err(&pdev->dev, "Unable to set DMA mask\n");
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
/* The set of channels can be split across multiple instances. */
chan_start = ((u32)(uintptr_t)base / BCM2835_DMA_CHAN_SIZE) & 0xf;
base -= BCM2835_DMA_CHAN(chan_start);
chan_count = resource_size(res) / BCM2835_DMA_CHAN_SIZE;
chan_end = min(chan_start + chan_count,
BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1);
od->base = base;
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
od->ddev.device_tx_status = bcm2835_dma_tx_status;
od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
od->ddev.device_config = bcm2835_dma_slave_config;
od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
od->ddev.device_synchronize = bcm2835_dma_synchronize;
od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
BIT(DMA_MEM_TO_MEM);
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.descriptor_reuse = true;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
platform_set_drvdata(pdev, od);
od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
PAGE_SIZE, DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
dev_err(&pdev->dev, "Failed to map zero page\n");
return -ENOMEM;
}
of_id = of_match_node(bcm2835_dma_of_match, pdev->dev.of_node);
if (!of_id) {
dev_err(&pdev->dev, "Failed to match compatible string\n");
return -EINVAL;
}
/* Request DMA channel mask from device tree */
if (of_property_read_u32(pdev->dev.of_node,
"brcm,dma-channel-mask",
&chans_available)) {
dev_err(&pdev->dev, "Failed to get channel mask\n");
rc = -EINVAL;
goto err_no_dma;
}
#ifdef CONFIG_DMA_BCM2708
/* One channel is reserved for the legacy API */
if (chans_available & BCM2835_DMA_BULK_MASK) {
rc = bcm_dmaman_probe(pdev, base,
chans_available & BCM2835_DMA_BULK_MASK);
if (rc)
dev_err(&pdev->dev,
"Failed to initialize the legacy API\n");
chans_available &= ~BCM2835_DMA_BULK_MASK;
}
#endif
/* And possibly one for the 40-bit DMA memcpy API */
if (chans_available & od->cfg_data->chan_40bit_mask &
BIT(BCM2711_DMA_MEMCPY_CHAN)) {
memcpy_parent = od;
memcpy_chan = BCM2835_DMA_CHANIO(base, BCM2711_DMA_MEMCPY_CHAN);
memcpy_scb = dma_alloc_coherent(memcpy_parent->ddev.dev,
sizeof(*memcpy_scb),
&memcpy_scb_dma, GFP_KERNEL);
if (!memcpy_scb)
dev_warn(&pdev->dev,
"Failed to allocated memcpy scb\n");
chans_available &= ~BIT(BCM2711_DMA_MEMCPY_CHAN);
}
/* get irqs for each channel that we support */
for (i = chan_start; i < chan_end; i++) {
/* skip masked out channels */
if (!(chans_available & (1 << i))) {
irq[i] = -1;
continue;
/* get the named irq */
snprintf(chan_name, sizeof(chan_name), "dma%i", i);
irq[i] = platform_get_irq_byname(pdev, chan_name);
if (irq[i] >= 0)
continue;
/* legacy device tree case handling */
dev_warn_once(&pdev->dev,
"missing interrupt-names property in device tree - legacy interpretation is used\n");
/*
* in case of channel >= 11
* use the 11th interrupt and that is shared
*/
irq[i] = platform_get_irq(pdev, i < 11 ? i : 11);
}
/* get irqs for each channel */
for (i = chan_start; i < chan_end; i++) {
/* skip channels without irq */
if (irq[i] < 0)
continue;
/* check if there are other channels that also use this irq */
/* FIXME: This will fail if interrupts are shared across
instances */
irq_flags = 0;
for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
if ((i != j) && (irq[j] == irq[i])) {
irq_flags = IRQF_SHARED;
break;
}
/* initialize the channel */
rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags);
if (rc)
goto err_no_dma;
dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", chan_count);
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
/* Device-tree DMA controller registration */
rc = of_dma_controller_register(pdev->dev.of_node,
bcm2835_dma_xlate, od);
if (rc) {
dev_err(&pdev->dev, "Failed to register DMA controller\n");
goto err_no_dma;
}
rc = dma_async_device_register(&od->ddev);
if (rc) {
dev_err(&pdev->dev,
"Failed to register slave DMA engine device: %d\n", rc);
goto err_no_dma;
}
dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
return 0;
err_no_dma:
bcm2835_dma_free(od);
return rc;
}
static int bcm2835_dma_remove(struct platform_device *pdev)
{
struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
bcm_dmaman_remove(pdev);
dma_async_device_unregister(&od->ddev);
if (memcpy_parent == od) {
dma_free_coherent(&pdev->dev, sizeof(*memcpy_scb), memcpy_scb,
memcpy_scb_dma);
memcpy_parent = NULL;
memcpy_scb = NULL;
memcpy_chan = NULL;
}
bcm2835_dma_free(od);
return 0;
}
static struct platform_driver bcm2835_dma_driver = {
.probe = bcm2835_dma_probe,
.remove = bcm2835_dma_remove,
.driver = {
.name = "bcm2835-dma",
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
},
};
static int bcm2835_dma_init(void)
{
return platform_driver_register(&bcm2835_dma_driver);
}
static void bcm2835_dma_exit(void)
{
platform_driver_unregister(&bcm2835_dma_driver);
}
/*
* Load after serial driver (arch_initcall) so we see the messages if it fails,
* but before drivers (module_init) that need a DMA channel.
*/
subsys_initcall(bcm2835_dma_init);
module_exit(bcm2835_dma_exit);
MODULE_ALIAS("platform:bcm2835-dma");
MODULE_DESCRIPTION("BCM2835 DMA engine driver");
MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
MODULE_LICENSE("GPL");