Loading drivers/dma/apple-admac.c +1 −1 Original line number Diff line number Diff line Loading @@ -585,7 +585,7 @@ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, return NULL; } return &ad->channels[index].chan; return dma_get_slave_channel(&ad->channels[index].chan); } static int admac_drain_reports(struct admac_data *ad, int channo) Loading drivers/dma/at_hdmac.c +60 −93 Original line number Diff line number Diff line Loading @@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) ATC_SPIP_BOUNDARY(first->boundary)); channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | ATC_DPIP_BOUNDARY(first->boundary)); /* Don't allow CPU to reorder channel enable. */ wmb(); dma_writel(atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); Loading Loading @@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; u32 ctrla, dscr, trials; u32 ctrla, dscr; unsigned int i; /* * If the cookie doesn't match to the currently running transfer then Loading Loading @@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) dscr = channel_readl(atchan, DSCR); rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { u32 new_dscr; rmb(); /* ensure DSCR is read after CTRLA */ Loading @@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); } if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) if (unlikely(i == ATC_MAX_DSCR_TRIALS)) return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ Loading Loading @@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); /* If the transfer was a memset, free our temporary buffer */ if (desc->memset_buffer) { dma_pool_free(atdma->memset_pool, desc->memset_vaddr, desc->memset_paddr); desc->memset_buffer = false; } /* move children to free_list */ list_splice_init(&desc->tx_list, &atchan->free_list); /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); spin_unlock_irqrestore(&atchan->lock, flags); dma_descriptor_unmap(txd); Loading @@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) dmaengine_desc_get_callback_invoke(txd, NULL); dma_run_dependencies(txd); } /** * atc_complete_all - finish work for all transactions * @atchan: channel to complete transactions for * * Eventually submit queued descriptors if any * * Assume channel is idle while calling this function * Called with atchan->lock held and bh disabled */ static void atc_complete_all(struct at_dma_chan *atchan) { struct at_desc *desc, *_desc; LIST_HEAD(list); unsigned long flags; dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); spin_lock_irqsave(&atchan->lock, flags); /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ if (!list_empty(&atchan->queue)) atc_dostart(atchan, atc_first_queued(atchan)); /* empty active_list now it is completed */ list_splice_init(&atchan->active_list, &list); /* empty queue list by moving descriptors (if any) to active_list */ list_splice_init(&atchan->queue, &atchan->active_list); /* move children to free_list */ list_splice_init(&desc->tx_list, &atchan->free_list); /* add myself to free_list */ list_add(&desc->desc_node, &atchan->free_list); spin_unlock_irqrestore(&atchan->lock, flags); list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); /* If the transfer was a memset, free our temporary buffer */ if (desc->memset_buffer) { dma_pool_free(atdma->memset_pool, desc->memset_vaddr, desc->memset_paddr); desc->memset_buffer = false; } } /** Loading @@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan) */ static void atc_advance_work(struct at_dma_chan *atchan) { struct at_desc *desc; unsigned long flags; int ret; dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); spin_lock_irqsave(&atchan->lock, flags); ret = atc_chan_is_enabled(atchan); spin_unlock_irqrestore(&atchan->lock, flags); if (ret) return; if (list_empty(&atchan->active_list) || list_is_singular(&atchan->active_list)) return atc_complete_all(atchan); if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) return spin_unlock_irqrestore(&atchan->lock, flags); atc_chain_complete(atchan, atc_first_active(atchan)); desc = atc_first_active(atchan); /* Remove the transfer node from the active list. */ list_del_init(&desc->desc_node); spin_unlock_irqrestore(&atchan->lock, flags); atc_chain_complete(atchan, desc); /* advance work */ spin_lock_irqsave(&atchan->lock, flags); atc_dostart(atchan, atc_first_active(atchan)); if (!list_empty(&atchan->active_list)) { desc = atc_first_queued(atchan); list_move_tail(&desc->desc_node, &atchan->active_list); atc_dostart(atchan, desc); } spin_unlock_irqrestore(&atchan->lock, flags); } Loading @@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan) static void atc_handle_error(struct at_dma_chan *atchan) { struct at_desc *bad_desc; struct at_desc *desc; struct at_desc *child; unsigned long flags; Loading @@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan) bad_desc = atc_first_active(atchan); list_del_init(&bad_desc->desc_node); /* As we are stopped, take advantage to push queued descriptors * in active_list */ list_splice_init(&atchan->queue, atchan->active_list.prev); /* Try to restart the controller */ if (!list_empty(&atchan->active_list)) atc_dostart(atchan, atc_first_active(atchan)); if (!list_empty(&atchan->active_list)) { desc = atc_first_queued(atchan); list_move_tail(&desc->desc_node, &atchan->active_list); atc_dostart(atchan, desc); } /* * KERN_CRITICAL may seem harsh, but since this only happens Loading Loading @@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, flags); cookie = dma_cookie_assign(tx); if (list_empty(&atchan->active_list)) { dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", desc->txd.cookie); atc_dostart(atchan, desc); list_add_tail(&desc->desc_node, &atchan->active_list); } else { dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); list_add_tail(&desc->desc_node, &atchan->queue); } spin_unlock_irqrestore(&atchan->lock, flags); dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); return cookie; } Loading Loading @@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan) struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; struct at_desc *desc, *_desc; unsigned long flags; LIST_HEAD(list); dev_vdbg(chan2dev(chan), "%s\n", __func__); /* Loading @@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan) cpu_relax(); /* active_list entries will end up before queued entries */ list_splice_init(&atchan->queue, &list); list_splice_init(&atchan->active_list, &list); spin_unlock_irqrestore(&atchan->lock, flags); /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); list_splice_tail_init(&atchan->queue, &atchan->free_list); list_splice_tail_init(&atchan->active_list, &atchan->free_list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); spin_unlock_irqrestore(&atchan->lock, flags); return 0; } Loading Loading @@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan, } /** * atc_issue_pending - try to finish work * atc_issue_pending - takes the first transaction descriptor in the pending * queue and starts the transfer. * @chan: target DMA channel */ static void atc_issue_pending(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_desc *desc; unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); /* Not needed for cyclic transfers */ if (atc_chan_is_cyclic(atchan)) return; spin_lock_irqsave(&atchan->lock, flags); if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) return spin_unlock_irqrestore(&atchan->lock, flags); atc_advance_work(atchan); desc = atc_first_queued(atchan); list_move_tail(&desc->desc_node, &atchan->active_list); atc_dostart(atchan, desc); spin_unlock_irqrestore(&atchan->lock, flags); } /** Loading Loading @@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", plat_dat->nr_channels); dma_async_device_register(&atdma->dma_common); err = dma_async_device_register(&atdma->dma_common); if (err) { dev_err(&pdev->dev, "Unable to register: %d.\n", err); goto err_dma_async_device_register; } /* * Do not return an error if the dmac node is not present in order to Loading @@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev) err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: dma_pool_destroy(atdma->dma_desc_pool); Loading drivers/dma/at_hdmac_regs.h +5 −5 Original line number Diff line number Diff line Loading @@ -186,13 +186,13 @@ /* LLI == Linked List Item; aka DMA buffer descriptor */ struct at_lli { /* values that are not changed by hardware */ dma_addr_t saddr; dma_addr_t daddr; u32 saddr; u32 daddr; /* value that may get written back: */ u32 ctrla; /* more values that are not changed by hardware */ u32 ctrlb; dma_addr_t dscr; /* chain to next lli */ u32 dscr; /* chain to next lli */ }; /** Loading drivers/dma/idxd/cdev.c +18 −0 Original line number Diff line number Diff line Loading @@ -312,6 +312,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace * can directly access physical address through the WQ. * - The IDXD cdev driver does not provide any ways to pin * user pages and translate the address from user VA to IOVA or * PA without IOMMU SVA. Therefore the application has no way * to instruct the device to perform DMA function. This makes * the cdev not usable for normal application usage. */ if (!device_user_pasid_enabled(idxd)) { idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU; dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); return -EOPNOTSUPP; } mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_USER; rc = drv_enable_wq(wq); Loading drivers/dma/idxd/device.c +17 −9 Original line number Diff line number Diff line Loading @@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); memset(wq->name, 0, WQ_NAME_SIZE); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); if (wq->opcap_bmap) bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } Loading Loading @@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) void idxd_device_clear_state(struct idxd_device *idxd) { if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return; /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { /* * Clearing wq state is protected by wq lock. * So no need to be protected by device lock. */ idxd_device_wqs_clear_state(idxd); spin_lock(&idxd->dev_lock); idxd_groups_clear_state(idxd); idxd_engines_clear_state(idxd); } else { spin_lock(&idxd->dev_lock); } idxd->state = IDXD_DEV_DISABLED; spin_unlock(&idxd->dev_lock); } Loading Loading @@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq) /* bytes 12-15 */ wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size); idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); /* bytes 32-63 */ if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { Loading Loading @@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->priority = wq->wqcfg->priority; wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift; idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); Loading Loading
drivers/dma/apple-admac.c +1 −1 Original line number Diff line number Diff line Loading @@ -585,7 +585,7 @@ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, return NULL; } return &ad->channels[index].chan; return dma_get_slave_channel(&ad->channels[index].chan); } static int admac_drain_reports(struct admac_data *ad, int channo) Loading
drivers/dma/at_hdmac.c +60 −93 Original line number Diff line number Diff line Loading @@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) ATC_SPIP_BOUNDARY(first->boundary)); channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | ATC_DPIP_BOUNDARY(first->boundary)); /* Don't allow CPU to reorder channel enable. */ wmb(); dma_writel(atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); Loading Loading @@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; u32 ctrla, dscr, trials; u32 ctrla, dscr; unsigned int i; /* * If the cookie doesn't match to the currently running transfer then Loading Loading @@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) dscr = channel_readl(atchan, DSCR); rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { u32 new_dscr; rmb(); /* ensure DSCR is read after CTRLA */ Loading @@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); } if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) if (unlikely(i == ATC_MAX_DSCR_TRIALS)) return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ Loading Loading @@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); /* If the transfer was a memset, free our temporary buffer */ if (desc->memset_buffer) { dma_pool_free(atdma->memset_pool, desc->memset_vaddr, desc->memset_paddr); desc->memset_buffer = false; } /* move children to free_list */ list_splice_init(&desc->tx_list, &atchan->free_list); /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); spin_unlock_irqrestore(&atchan->lock, flags); dma_descriptor_unmap(txd); Loading @@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) dmaengine_desc_get_callback_invoke(txd, NULL); dma_run_dependencies(txd); } /** * atc_complete_all - finish work for all transactions * @atchan: channel to complete transactions for * * Eventually submit queued descriptors if any * * Assume channel is idle while calling this function * Called with atchan->lock held and bh disabled */ static void atc_complete_all(struct at_dma_chan *atchan) { struct at_desc *desc, *_desc; LIST_HEAD(list); unsigned long flags; dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); spin_lock_irqsave(&atchan->lock, flags); /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ if (!list_empty(&atchan->queue)) atc_dostart(atchan, atc_first_queued(atchan)); /* empty active_list now it is completed */ list_splice_init(&atchan->active_list, &list); /* empty queue list by moving descriptors (if any) to active_list */ list_splice_init(&atchan->queue, &atchan->active_list); /* move children to free_list */ list_splice_init(&desc->tx_list, &atchan->free_list); /* add myself to free_list */ list_add(&desc->desc_node, &atchan->free_list); spin_unlock_irqrestore(&atchan->lock, flags); list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); /* If the transfer was a memset, free our temporary buffer */ if (desc->memset_buffer) { dma_pool_free(atdma->memset_pool, desc->memset_vaddr, desc->memset_paddr); desc->memset_buffer = false; } } /** Loading @@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan) */ static void atc_advance_work(struct at_dma_chan *atchan) { struct at_desc *desc; unsigned long flags; int ret; dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); spin_lock_irqsave(&atchan->lock, flags); ret = atc_chan_is_enabled(atchan); spin_unlock_irqrestore(&atchan->lock, flags); if (ret) return; if (list_empty(&atchan->active_list) || list_is_singular(&atchan->active_list)) return atc_complete_all(atchan); if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) return spin_unlock_irqrestore(&atchan->lock, flags); atc_chain_complete(atchan, atc_first_active(atchan)); desc = atc_first_active(atchan); /* Remove the transfer node from the active list. */ list_del_init(&desc->desc_node); spin_unlock_irqrestore(&atchan->lock, flags); atc_chain_complete(atchan, desc); /* advance work */ spin_lock_irqsave(&atchan->lock, flags); atc_dostart(atchan, atc_first_active(atchan)); if (!list_empty(&atchan->active_list)) { desc = atc_first_queued(atchan); list_move_tail(&desc->desc_node, &atchan->active_list); atc_dostart(atchan, desc); } spin_unlock_irqrestore(&atchan->lock, flags); } Loading @@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan) static void atc_handle_error(struct at_dma_chan *atchan) { struct at_desc *bad_desc; struct at_desc *desc; struct at_desc *child; unsigned long flags; Loading @@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan) bad_desc = atc_first_active(atchan); list_del_init(&bad_desc->desc_node); /* As we are stopped, take advantage to push queued descriptors * in active_list */ list_splice_init(&atchan->queue, atchan->active_list.prev); /* Try to restart the controller */ if (!list_empty(&atchan->active_list)) atc_dostart(atchan, atc_first_active(atchan)); if (!list_empty(&atchan->active_list)) { desc = atc_first_queued(atchan); list_move_tail(&desc->desc_node, &atchan->active_list); atc_dostart(atchan, desc); } /* * KERN_CRITICAL may seem harsh, but since this only happens Loading Loading @@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, flags); cookie = dma_cookie_assign(tx); if (list_empty(&atchan->active_list)) { dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", desc->txd.cookie); atc_dostart(atchan, desc); list_add_tail(&desc->desc_node, &atchan->active_list); } else { dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); list_add_tail(&desc->desc_node, &atchan->queue); } spin_unlock_irqrestore(&atchan->lock, flags); dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); return cookie; } Loading Loading @@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan) struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; struct at_desc *desc, *_desc; unsigned long flags; LIST_HEAD(list); dev_vdbg(chan2dev(chan), "%s\n", __func__); /* Loading @@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan) cpu_relax(); /* active_list entries will end up before queued entries */ list_splice_init(&atchan->queue, &list); list_splice_init(&atchan->active_list, &list); spin_unlock_irqrestore(&atchan->lock, flags); /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); list_splice_tail_init(&atchan->queue, &atchan->free_list); list_splice_tail_init(&atchan->active_list, &atchan->free_list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); spin_unlock_irqrestore(&atchan->lock, flags); return 0; } Loading Loading @@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan, } /** * atc_issue_pending - try to finish work * atc_issue_pending - takes the first transaction descriptor in the pending * queue and starts the transfer. * @chan: target DMA channel */ static void atc_issue_pending(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_desc *desc; unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); /* Not needed for cyclic transfers */ if (atc_chan_is_cyclic(atchan)) return; spin_lock_irqsave(&atchan->lock, flags); if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) return spin_unlock_irqrestore(&atchan->lock, flags); atc_advance_work(atchan); desc = atc_first_queued(atchan); list_move_tail(&desc->desc_node, &atchan->active_list); atc_dostart(atchan, desc); spin_unlock_irqrestore(&atchan->lock, flags); } /** Loading Loading @@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", plat_dat->nr_channels); dma_async_device_register(&atdma->dma_common); err = dma_async_device_register(&atdma->dma_common); if (err) { dev_err(&pdev->dev, "Unable to register: %d.\n", err); goto err_dma_async_device_register; } /* * Do not return an error if the dmac node is not present in order to Loading @@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev) err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: dma_pool_destroy(atdma->dma_desc_pool); Loading
drivers/dma/at_hdmac_regs.h +5 −5 Original line number Diff line number Diff line Loading @@ -186,13 +186,13 @@ /* LLI == Linked List Item; aka DMA buffer descriptor */ struct at_lli { /* values that are not changed by hardware */ dma_addr_t saddr; dma_addr_t daddr; u32 saddr; u32 daddr; /* value that may get written back: */ u32 ctrla; /* more values that are not changed by hardware */ u32 ctrlb; dma_addr_t dscr; /* chain to next lli */ u32 dscr; /* chain to next lli */ }; /** Loading
drivers/dma/idxd/cdev.c +18 −0 Original line number Diff line number Diff line Loading @@ -312,6 +312,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace * can directly access physical address through the WQ. * - The IDXD cdev driver does not provide any ways to pin * user pages and translate the address from user VA to IOVA or * PA without IOMMU SVA. Therefore the application has no way * to instruct the device to perform DMA function. This makes * the cdev not usable for normal application usage. */ if (!device_user_pasid_enabled(idxd)) { idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU; dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); return -EOPNOTSUPP; } mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_USER; rc = drv_enable_wq(wq); Loading
drivers/dma/idxd/device.c +17 −9 Original line number Diff line number Diff line Loading @@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); memset(wq->name, 0, WQ_NAME_SIZE); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); if (wq->opcap_bmap) bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } Loading Loading @@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) void idxd_device_clear_state(struct idxd_device *idxd) { if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return; /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { /* * Clearing wq state is protected by wq lock. * So no need to be protected by device lock. */ idxd_device_wqs_clear_state(idxd); spin_lock(&idxd->dev_lock); idxd_groups_clear_state(idxd); idxd_engines_clear_state(idxd); } else { spin_lock(&idxd->dev_lock); } idxd->state = IDXD_DEV_DISABLED; spin_unlock(&idxd->dev_lock); } Loading Loading @@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq) /* bytes 12-15 */ wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size); idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); /* bytes 32-63 */ if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { Loading Loading @@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->priority = wq->wqcfg->priority; wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift; idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); Loading