Home
last modified time | relevance | path

Searched refs:async_tx (Results 1 – 20 of 20) sorted by relevance

/linux-3.4.99/drivers/dma/
Dmv_xor.c42 container_of(tx, struct mv_xor_desc_slot, async_tx)
283 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); in mv_xor_start_new_chain()
293 BUG_ON(desc->async_tx.cookie < 0); in mv_xor_run_tx_complete_actions()
295 if (desc->async_tx.cookie > 0) { in mv_xor_run_tx_complete_actions()
296 cookie = desc->async_tx.cookie; in mv_xor_run_tx_complete_actions()
301 if (desc->async_tx.callback) in mv_xor_run_tx_complete_actions()
302 desc->async_tx.callback( in mv_xor_run_tx_complete_actions()
303 desc->async_tx.callback_param); in mv_xor_run_tx_complete_actions()
313 enum dma_ctrl_flags flags = desc->async_tx.flags; in mv_xor_run_tx_complete_actions()
345 dma_run_dependencies(&desc->async_tx); in mv_xor_run_tx_complete_actions()
[all …]
Diop-adma.c45 container_of(tx, struct iop_adma_desc_slot, async_tx)
67 struct dma_async_tx_descriptor *tx = &desc->async_tx; in iop_desc_unmap()
103 struct dma_async_tx_descriptor *tx = &desc->async_tx; in iop_desc_unmap_pq()
142 struct dma_async_tx_descriptor *tx = &desc->async_tx; in iop_adma_run_tx_complete_actions()
179 if (!async_tx_test_ack(&desc->async_tx)) in iop_adma_clean_slot()
214 iter->async_tx.cookie, iter->idx, busy, in __iop_adma_slot_cleanup()
215 iter->async_tx.phys, iop_desc_get_next_desc(iter), in __iop_adma_slot_cleanup()
216 async_tx_test_ack(&iter->async_tx)); in __iop_adma_slot_cleanup()
218 prefetch(&_iter->async_tx); in __iop_adma_slot_cleanup()
231 if (iter->async_tx.phys == current_desc) { in __iop_adma_slot_cleanup()
[all …]
Dfsldma.c393 set_desc_next(chan, &tail->hw, desc->async_tx.phys); in append_ld_queue()
418 cookie = dma_cookie_assign(&child->async_tx); in fsl_dma_tx_submit()
448 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in fsl_dma_alloc_descriptor()
449 desc->async_tx.tx_submit = fsl_dma_tx_submit; in fsl_dma_alloc_descriptor()
450 desc->async_tx.phys = pdesc; in fsl_dma_alloc_descriptor()
508 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsldma_free_desc_list()
522 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsldma_free_desc_list_reverse()
562 new->async_tx.cookie = -EBUSY; in fsl_dma_prep_interrupt()
563 new->async_tx.flags = flags; in fsl_dma_prep_interrupt()
571 return &new->async_tx; in fsl_dma_prep_interrupt()
[all …]
Dshdma.c310 chunk->async_tx.cookie > 0 || in sh_dmae_tx_submit()
311 chunk->async_tx.cookie == -EBUSY || in sh_dmae_tx_submit()
316 chunk->async_tx.callback = NULL; in sh_dmae_tx_submit()
322 last->async_tx.callback = callback; in sh_dmae_tx_submit()
323 last->async_tx.callback_param = tx->callback_param; in sh_dmae_tx_submit()
326 tx->cookie, &last->async_tx, sh_chan->id, in sh_dmae_tx_submit()
431 dma_async_tx_descriptor_init(&desc->async_tx, in sh_dmae_alloc_chan_resources()
433 desc->async_tx.tx_submit = sh_dmae_tx_submit; in sh_dmae_alloc_chan_resources()
534 new->async_tx.cookie = -EBUSY; in sh_dmae_add_desc()
538 new->async_tx.cookie = -EINVAL; in sh_dmae_add_desc()
[all …]
Dfsldma.h101 struct dma_async_tx_descriptor async_tx; member
161 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
Dshdma.h62 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
Dmv_xor.h145 struct dma_async_tx_descriptor async_tx; member
DKconfig281 bool "Async_tx: Offload support for the async_tx api"
284 This allows the async_tx api to take advantage of offload engines for
/linux-3.4.99/Documentation/crypto/
Dasync-tx-api.txt26 The async_tx API provides methods for describing a chain of asynchronous
87 async_tx call will implicitly set the acknowledged state.
163 See include/linux/async_tx.h for more information on the flags. See the
171 accommodate assumptions made by applications using the async_tx API:
222 include/linux/async_tx.h: core header file for the async_tx api
223 crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
224 crypto/async_tx/async_memcpy.c: copy offload
225 crypto/async_tx/async_memset.c: memory fill offload
226 crypto/async_tx/async_xor.c: xor and xor zero sum offload
/linux-3.4.99/drivers/dma/ppc4xx/
Dadma.c1723 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { in ppc440spe_adma_unmap()
1734 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { in ppc440spe_adma_unmap()
1756 BUG_ON(desc->async_tx.cookie < 0); in ppc440spe_adma_run_tx_complete_actions()
1757 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions()
1758 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions()
1759 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions()
1764 if (desc->async_tx.callback) in ppc440spe_adma_run_tx_complete_actions()
1765 desc->async_tx.callback( in ppc440spe_adma_run_tx_complete_actions()
1766 desc->async_tx.callback_param); in ppc440spe_adma_run_tx_complete_actions()
1791 dma_run_dependencies(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions()
[all …]
Dadma.h23 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
150 struct dma_async_tx_descriptor async_tx; member
/linux-3.4.99/crypto/async_tx/
DMakefile1 obj-$(CONFIG_ASYNC_CORE) += async_tx.o
/linux-3.4.99/arch/arm/include/asm/hardware/
Diop_adma.h105 struct dma_async_tx_descriptor async_tx; member
Diop3xx-adma.h631 (u32) (desc->async_tx.phys + (i << 5)); in iop_desc_init_zero_sum()
/linux-3.4.99/include/linux/
Dsh_dma.h32 struct dma_async_tx_descriptor async_tx; member
/linux-3.4.99/crypto/
DMakefile98 obj-$(CONFIG_ASYNC_CORE) += async_tx/
DKconfig8 # async_tx api: hardware offloaded memory transfer/transform support
10 source "crypto/async_tx/Kconfig"
/linux-3.4.99/Documentation/
Ddmaengine.txt6 NOTE: For DMA Engine usage in async_tx please see:
120 Although the async_tx API specifies that completion callback
/linux-3.4.99/drivers/tty/serial/
Dsh-sci.c1397 async_tx); in work_fn_rx()
/linux-3.4.99/
DMAINTAINERS1225 F: crypto/async_tx/
1228 F: include/linux/async_tx.h
2301 T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git