19ab65affSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */ 2c13c8260SChris Leech /* 3c13c8260SChris Leech * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 4c13c8260SChris Leech */ 5d2ebfb33SRussell King - ARM Linux #ifndef LINUX_DMAENGINE_H 6d2ebfb33SRussell King - ARM Linux #define LINUX_DMAENGINE_H 71c0f16e5SDavid Woodhouse 8c13c8260SChris Leech #include <linux/device.h> 90ad7c000SStephen Warren #include <linux/err.h> 10c13c8260SChris Leech #include <linux/uio.h> 11187f1882SPaul Gortmaker #include <linux/bug.h> 1290b44f8fSVinod Koul #include <linux/scatterlist.h> 13a8efa9d6SPaul Gortmaker #include <linux/bitmap.h> 14dcc043dcSViresh Kumar #include <linux/types.h> 15a8efa9d6SPaul Gortmaker #include <asm/page.h> 16b7f080cfSAlexey Dobriyan 17c13c8260SChris Leech /** 18fe4ada2dSRandy Dunlap * typedef dma_cookie_t - an opaque DMA cookie 19c13c8260SChris Leech * 20c13c8260SChris Leech * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 21c13c8260SChris Leech */ 22c13c8260SChris Leech typedef s32 dma_cookie_t; 2376bd061fSSteven J. Magnani #define DMA_MIN_COOKIE 1 24c13c8260SChris Leech 2571ea1483SDan Carpenter static inline int dma_submit_error(dma_cookie_t cookie) 2671ea1483SDan Carpenter { 2771ea1483SDan Carpenter return cookie < 0 ? cookie : 0; 2871ea1483SDan Carpenter } 29c13c8260SChris Leech 30c13c8260SChris Leech /** 31c13c8260SChris Leech * enum dma_status - DMA transaction status 32adfedd9aSVinod Koul * @DMA_COMPLETE: transaction completed 33c13c8260SChris Leech * @DMA_IN_PROGRESS: transaction not yet processed 3407934481SLinus Walleij * @DMA_PAUSED: transaction is paused 35c13c8260SChris Leech * @DMA_ERROR: transaction failed 36c13c8260SChris Leech */ 37c13c8260SChris Leech enum dma_status { 387db5f727SVinod Koul DMA_COMPLETE, 39c13c8260SChris Leech DMA_IN_PROGRESS, 4007934481SLinus Walleij DMA_PAUSED, 41c13c8260SChris Leech DMA_ERROR, 42c13c8260SChris Leech }; 43c13c8260SChris Leech 44c13c8260SChris Leech /** 457405f74bSDan Williams * enum dma_transaction_type - DMA transaction types/indexes 46138f4c35SDan Williams * 47138f4c35SDan Williams * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is 48138f4c35SDan Williams * automatically set as dma devices are registered. 497405f74bSDan Williams */ 507405f74bSDan Williams enum dma_transaction_type { 517405f74bSDan Williams DMA_MEMCPY, 527405f74bSDan Williams DMA_XOR, 53b2f46fd8SDan Williams DMA_PQ, 54099f53cbSDan Williams DMA_XOR_VAL, 55099f53cbSDan Williams DMA_PQ_VAL, 564983a501SMaxime Ripard DMA_MEMSET, 5750c7cd2bSMaxime Ripard DMA_MEMSET_SG, 587405f74bSDan Williams DMA_INTERRUPT, 5959b5ec21SDan Williams DMA_PRIVATE, 60138f4c35SDan Williams DMA_ASYNC_TX, 61dc0ee643SHaavard Skinnemoen DMA_SLAVE, 62782bc950SSascha Hauer DMA_CYCLIC, 63b14dab79SJassi Brar DMA_INTERLEAVE, 647405f74bSDan Williams /* last transaction type for creation of the capabilities mask */ 65b14dab79SJassi Brar DMA_TX_TYPE_END, 66b14dab79SJassi Brar }; 67dc0ee643SHaavard Skinnemoen 6849920bc6SVinod Koul /** 6949920bc6SVinod Koul * enum dma_transfer_direction - dma transfer mode and direction indicator 7049920bc6SVinod Koul * @DMA_MEM_TO_MEM: Async/Memcpy mode 7149920bc6SVinod Koul * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device 7249920bc6SVinod Koul * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory 7349920bc6SVinod Koul * @DMA_DEV_TO_DEV: Slave mode & From Device to Device 7449920bc6SVinod Koul */ 7549920bc6SVinod Koul enum dma_transfer_direction { 7649920bc6SVinod Koul DMA_MEM_TO_MEM, 7749920bc6SVinod Koul DMA_MEM_TO_DEV, 7849920bc6SVinod Koul DMA_DEV_TO_MEM, 7949920bc6SVinod Koul DMA_DEV_TO_DEV, 8062268ce9SShawn Guo DMA_TRANS_NONE, 8149920bc6SVinod Koul }; 827405f74bSDan Williams 837405f74bSDan Williams /** 84b14dab79SJassi Brar * Interleaved Transfer Request 85b14dab79SJassi Brar * ---------------------------- 86b14dab79SJassi Brar * A chunk is collection of contiguous bytes to be transfered. 87b14dab79SJassi Brar * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). 88b14dab79SJassi Brar * ICGs may or maynot change between chunks. 89b14dab79SJassi Brar * A FRAME is the smallest series of contiguous {chunk,icg} pairs, 90b14dab79SJassi Brar * that when repeated an integral number of times, specifies the transfer. 91b14dab79SJassi Brar * A transfer template is specification of a Frame, the number of times 92b14dab79SJassi Brar * it is to be repeated and other per-transfer attributes. 93b14dab79SJassi Brar * 94b14dab79SJassi Brar * Practically, a client driver would have ready a template for each 95b14dab79SJassi Brar * type of transfer it is going to need during its lifetime and 96b14dab79SJassi Brar * set only 'src_start' and 'dst_start' before submitting the requests. 97b14dab79SJassi Brar * 98b14dab79SJassi Brar * 99b14dab79SJassi Brar * | Frame-1 | Frame-2 | ~ | Frame-'numf' | 100b14dab79SJassi Brar * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...| 101b14dab79SJassi Brar * 102b14dab79SJassi Brar * == Chunk size 103b14dab79SJassi Brar * ... ICG 104b14dab79SJassi Brar */ 105b14dab79SJassi Brar 106b14dab79SJassi Brar /** 107b14dab79SJassi Brar * struct data_chunk - Element of scatter-gather list that makes a frame. 108b14dab79SJassi Brar * @size: Number of bytes to read from source. 109b14dab79SJassi Brar * size_dst := fn(op, size_src), so doesn't mean much for destination. 110b14dab79SJassi Brar * @icg: Number of bytes to jump after last src/dst address of this 111b14dab79SJassi Brar * chunk and before first src/dst address for next chunk. 112b14dab79SJassi Brar * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. 113b14dab79SJassi Brar * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. 114e1031dc1SMaxime Ripard * @dst_icg: Number of bytes to jump after last dst address of this 115e1031dc1SMaxime Ripard * chunk and before the first dst address for next chunk. 116e1031dc1SMaxime Ripard * Ignored if dst_inc is true and dst_sgl is false. 117e1031dc1SMaxime Ripard * @src_icg: Number of bytes to jump after last src address of this 118e1031dc1SMaxime Ripard * chunk and before the first src address for next chunk. 119e1031dc1SMaxime Ripard * Ignored if src_inc is true and src_sgl is false. 120b14dab79SJassi Brar */ 121b14dab79SJassi Brar struct data_chunk { 122b14dab79SJassi Brar size_t size; 123b14dab79SJassi Brar size_t icg; 124e1031dc1SMaxime Ripard size_t dst_icg; 125e1031dc1SMaxime Ripard size_t src_icg; 126b14dab79SJassi Brar }; 127b14dab79SJassi Brar 128b14dab79SJassi Brar /** 129b14dab79SJassi Brar * struct dma_interleaved_template - Template to convey DMAC the transfer pattern 130b14dab79SJassi Brar * and attributes. 131b14dab79SJassi Brar * @src_start: Bus address of source for the first chunk. 132b14dab79SJassi Brar * @dst_start: Bus address of destination for the first chunk. 133b14dab79SJassi Brar * @dir: Specifies the type of Source and Destination. 134b14dab79SJassi Brar * @src_inc: If the source address increments after reading from it. 135b14dab79SJassi Brar * @dst_inc: If the destination address increments after writing to it. 136b14dab79SJassi Brar * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read). 137b14dab79SJassi Brar * Otherwise, source is read contiguously (icg ignored). 138b14dab79SJassi Brar * Ignored if src_inc is false. 139b14dab79SJassi Brar * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write). 140b14dab79SJassi Brar * Otherwise, destination is filled contiguously (icg ignored). 141b14dab79SJassi Brar * Ignored if dst_inc is false. 142b14dab79SJassi Brar * @numf: Number of frames in this template. 143b14dab79SJassi Brar * @frame_size: Number of chunks in a frame i.e, size of sgl[]. 144b14dab79SJassi Brar * @sgl: Array of {chunk,icg} pairs that make up a frame. 145b14dab79SJassi Brar */ 146b14dab79SJassi Brar struct dma_interleaved_template { 147b14dab79SJassi Brar dma_addr_t src_start; 148b14dab79SJassi Brar dma_addr_t dst_start; 149b14dab79SJassi Brar enum dma_transfer_direction dir; 150b14dab79SJassi Brar bool src_inc; 151b14dab79SJassi Brar bool dst_inc; 152b14dab79SJassi Brar bool src_sgl; 153b14dab79SJassi Brar bool dst_sgl; 154b14dab79SJassi Brar size_t numf; 155b14dab79SJassi Brar size_t frame_size; 156b14dab79SJassi Brar struct data_chunk sgl[0]; 157b14dab79SJassi Brar }; 158b14dab79SJassi Brar 159b14dab79SJassi Brar /** 160636bdeaaSDan Williams * enum dma_ctrl_flags - DMA flags to augment operation preparation, 161636bdeaaSDan Williams * control completion, and communicate status. 162d4c56f97SDan Williams * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 163d4c56f97SDan Williams * this transaction 164a88f6667SGuennadi Liakhovetski * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client 165b2f46fd8SDan Williams * acknowledges receipt, i.e. has has a chance to establish any dependency 166b2f46fd8SDan Williams * chains 167b2f46fd8SDan Williams * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q 168b2f46fd8SDan Williams * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P 169b2f46fd8SDan Williams * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as 170b2f46fd8SDan Williams * sources that were the result of a previous operation, in the case of a PQ 171b2f46fd8SDan Williams * operation it continues the calculation with new sources 1720403e382SDan Williams * @DMA_PREP_FENCE - tell the driver that subsequent operations depend 1730403e382SDan Williams * on the result of this operation 17427242021SVinod Koul * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till 17527242021SVinod Koul * cleared or freed 1763e00ab4aSAbhishek Sahu * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command 1773e00ab4aSAbhishek Sahu * data and the descriptor should be in different format from normal 1783e00ab4aSAbhishek Sahu * data descriptors. 179d4c56f97SDan Williams */ 180636bdeaaSDan Williams enum dma_ctrl_flags { 181d4c56f97SDan Williams DMA_PREP_INTERRUPT = (1 << 0), 182636bdeaaSDan Williams DMA_CTRL_ACK = (1 << 1), 1830776ae7bSBartlomiej Zolnierkiewicz DMA_PREP_PQ_DISABLE_P = (1 << 2), 1840776ae7bSBartlomiej Zolnierkiewicz DMA_PREP_PQ_DISABLE_Q = (1 << 3), 1850776ae7bSBartlomiej Zolnierkiewicz DMA_PREP_CONTINUE = (1 << 4), 1860776ae7bSBartlomiej Zolnierkiewicz DMA_PREP_FENCE = (1 << 5), 18727242021SVinod Koul DMA_CTRL_REUSE = (1 << 6), 1883e00ab4aSAbhishek Sahu DMA_PREP_CMD = (1 << 7), 189d4c56f97SDan Williams }; 190d4c56f97SDan Williams 191d4c56f97SDan Williams /** 192ad283ea4SDan Williams * enum sum_check_bits - bit position of pq_check_flags 193ad283ea4SDan Williams */ 194ad283ea4SDan Williams enum sum_check_bits { 195ad283ea4SDan Williams SUM_CHECK_P = 0, 196ad283ea4SDan Williams SUM_CHECK_Q = 1, 197ad283ea4SDan Williams }; 198ad283ea4SDan Williams 199ad283ea4SDan Williams /** 200ad283ea4SDan Williams * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations 201ad283ea4SDan Williams * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise 202ad283ea4SDan Williams * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise 203ad283ea4SDan Williams */ 204ad283ea4SDan Williams enum sum_check_flags { 205ad283ea4SDan Williams SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), 206ad283ea4SDan Williams SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), 207ad283ea4SDan Williams }; 208ad283ea4SDan Williams 209ad283ea4SDan Williams 210ad283ea4SDan Williams /** 2117405f74bSDan Williams * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 2127405f74bSDan Williams * See linux/cpumask.h 2137405f74bSDan Williams */ 2147405f74bSDan Williams typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 2157405f74bSDan Williams 2167405f74bSDan Williams /** 217c13c8260SChris Leech * struct dma_chan_percpu - the per-CPU part of struct dma_chan 218c13c8260SChris Leech * @memcpy_count: transaction counter 219c13c8260SChris Leech * @bytes_transferred: byte counter 220c13c8260SChris Leech */ 221c13c8260SChris Leech 2224db8fd32SPeter Ujfalusi /** 2234db8fd32SPeter Ujfalusi * enum dma_desc_metadata_mode - per descriptor metadata mode types supported 2244db8fd32SPeter Ujfalusi * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the 2254db8fd32SPeter Ujfalusi * client driver and it is attached (via the dmaengine_desc_attach_metadata() 2264db8fd32SPeter Ujfalusi * helper) to the descriptor. 2274db8fd32SPeter Ujfalusi * 2284db8fd32SPeter Ujfalusi * Client drivers interested to use this mode can follow: 2294db8fd32SPeter Ujfalusi * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: 2304db8fd32SPeter Ujfalusi * 1. prepare the descriptor (dmaengine_prep_*) 2314db8fd32SPeter Ujfalusi * construct the metadata in the client's buffer 2324db8fd32SPeter Ujfalusi * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the 2334db8fd32SPeter Ujfalusi * descriptor 2344db8fd32SPeter Ujfalusi * 3. submit the transfer 2354db8fd32SPeter Ujfalusi * - DMA_DEV_TO_MEM: 2364db8fd32SPeter Ujfalusi * 1. prepare the descriptor (dmaengine_prep_*) 2374db8fd32SPeter Ujfalusi * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the 2384db8fd32SPeter Ujfalusi * descriptor 2394db8fd32SPeter Ujfalusi * 3. submit the transfer 2404db8fd32SPeter Ujfalusi * 4. when the transfer is completed, the metadata should be available in the 2414db8fd32SPeter Ujfalusi * attached buffer 2424db8fd32SPeter Ujfalusi * 2434db8fd32SPeter Ujfalusi * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA 2444db8fd32SPeter Ujfalusi * driver. The client driver can ask for the pointer, maximum size and the 2454db8fd32SPeter Ujfalusi * currently used size of the metadata and can directly update or read it. 2464db8fd32SPeter Ujfalusi * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is 2474db8fd32SPeter Ujfalusi * provided as helper functions. 2484db8fd32SPeter Ujfalusi * 2494db8fd32SPeter Ujfalusi * Note: the metadata area for the descriptor is no longer valid after the 2504db8fd32SPeter Ujfalusi * transfer has been completed (valid up to the point when the completion 2514db8fd32SPeter Ujfalusi * callback returns if used). 2524db8fd32SPeter Ujfalusi * 2534db8fd32SPeter Ujfalusi * Client drivers interested to use this mode can follow: 2544db8fd32SPeter Ujfalusi * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: 2554db8fd32SPeter Ujfalusi * 1. prepare the descriptor (dmaengine_prep_*) 2564db8fd32SPeter Ujfalusi * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's 2574db8fd32SPeter Ujfalusi * metadata area 2584db8fd32SPeter Ujfalusi * 3. update the metadata at the pointer 2594db8fd32SPeter Ujfalusi * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount 2604db8fd32SPeter Ujfalusi * of data the client has placed into the metadata buffer 2614db8fd32SPeter Ujfalusi * 5. submit the transfer 2624db8fd32SPeter Ujfalusi * - DMA_DEV_TO_MEM: 2634db8fd32SPeter Ujfalusi * 1. prepare the descriptor (dmaengine_prep_*) 2644db8fd32SPeter Ujfalusi * 2. submit the transfer 2654db8fd32SPeter Ujfalusi * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the 2664db8fd32SPeter Ujfalusi * pointer to the engine's metadata area 2674db8fd32SPeter Ujfalusi * 4. Read out the metadata from the pointer 2684db8fd32SPeter Ujfalusi * 2694db8fd32SPeter Ujfalusi * Note: the two mode is not compatible and clients must use one mode for a 2704db8fd32SPeter Ujfalusi * descriptor. 2714db8fd32SPeter Ujfalusi */ 2724db8fd32SPeter Ujfalusi enum dma_desc_metadata_mode { 2734db8fd32SPeter Ujfalusi DESC_METADATA_NONE = 0, 2744db8fd32SPeter Ujfalusi DESC_METADATA_CLIENT = BIT(0), 2754db8fd32SPeter Ujfalusi DESC_METADATA_ENGINE = BIT(1), 2764db8fd32SPeter Ujfalusi }; 2774db8fd32SPeter Ujfalusi 278c13c8260SChris Leech struct dma_chan_percpu { 279c13c8260SChris Leech /* stats */ 280c13c8260SChris Leech unsigned long memcpy_count; 281c13c8260SChris Leech unsigned long bytes_transferred; 282c13c8260SChris Leech }; 283c13c8260SChris Leech 284c13c8260SChris Leech /** 28556f13c0dSPeter Ujfalusi * struct dma_router - DMA router structure 28656f13c0dSPeter Ujfalusi * @dev: pointer to the DMA router device 28756f13c0dSPeter Ujfalusi * @route_free: function to be called when the route can be disconnected 28856f13c0dSPeter Ujfalusi */ 28956f13c0dSPeter Ujfalusi struct dma_router { 29056f13c0dSPeter Ujfalusi struct device *dev; 29156f13c0dSPeter Ujfalusi void (*route_free)(struct device *dev, void *route_data); 29256f13c0dSPeter Ujfalusi }; 29356f13c0dSPeter Ujfalusi 29456f13c0dSPeter Ujfalusi /** 295c13c8260SChris Leech * struct dma_chan - devices supply DMA channels, clients use them 296fe4ada2dSRandy Dunlap * @device: ptr to the dma device who supplies this channel, always !%NULL 29771723a96SGeert Uytterhoeven * @slave: ptr to the device using this channel 298c13c8260SChris Leech * @cookie: last cookie value returned to client 2994d4e58deSRussell King - ARM Linux * @completed_cookie: last completed cookie for this channel 300fe4ada2dSRandy Dunlap * @chan_id: channel ID for sysfs 30141d5e59cSDan Williams * @dev: class device for sysfs 30271723a96SGeert Uytterhoeven * @name: backlink name for sysfs 303c13c8260SChris Leech * @device_node: used to add this to the device chan list 304c13c8260SChris Leech * @local: per-cpu pointer to a struct dma_chan_percpu 305868d2ee2SVinod Koul * @client_count: how many clients are using this channel 306bec08513SDan Williams * @table_count: number of appearances in the mem-to-mem allocation table 30756f13c0dSPeter Ujfalusi * @router: pointer to the DMA router structure 30856f13c0dSPeter Ujfalusi * @route_data: channel specific data for the router 309287d8592SDan Williams * @private: private data for certain client-channel associations 310c13c8260SChris Leech */ 311c13c8260SChris Leech struct dma_chan { 312c13c8260SChris Leech struct dma_device *device; 31371723a96SGeert Uytterhoeven struct device *slave; 314c13c8260SChris Leech dma_cookie_t cookie; 3154d4e58deSRussell King - ARM Linux dma_cookie_t completed_cookie; 316c13c8260SChris Leech 317c13c8260SChris Leech /* sysfs */ 318c13c8260SChris Leech int chan_id; 31941d5e59cSDan Williams struct dma_chan_dev *dev; 32071723a96SGeert Uytterhoeven const char *name; 321c13c8260SChris Leech 322c13c8260SChris Leech struct list_head device_node; 323a29d8b8eSTejun Heo struct dma_chan_percpu __percpu *local; 3247cc5bf9aSDan Williams int client_count; 325bec08513SDan Williams int table_count; 32656f13c0dSPeter Ujfalusi 32756f13c0dSPeter Ujfalusi /* DMA router */ 32856f13c0dSPeter Ujfalusi struct dma_router *router; 32956f13c0dSPeter Ujfalusi void *route_data; 33056f13c0dSPeter Ujfalusi 331287d8592SDan Williams void *private; 332c13c8260SChris Leech }; 333c13c8260SChris Leech 33441d5e59cSDan Williams /** 33541d5e59cSDan Williams * struct dma_chan_dev - relate sysfs device node to backing channel device 336868d2ee2SVinod Koul * @chan: driver channel device 337868d2ee2SVinod Koul * @device: sysfs device 338868d2ee2SVinod Koul * @dev_id: parent dma_device dev_id 339868d2ee2SVinod Koul * @idr_ref: reference count to gate release of dma_device dev_id 34041d5e59cSDan Williams */ 34141d5e59cSDan Williams struct dma_chan_dev { 34241d5e59cSDan Williams struct dma_chan *chan; 34341d5e59cSDan Williams struct device device; 344864498aaSDan Williams int dev_id; 345864498aaSDan Williams atomic_t *idr_ref; 34641d5e59cSDan Williams }; 34741d5e59cSDan Williams 348c156d0a5SLinus Walleij /** 349ba730340SAlexander Popov * enum dma_slave_buswidth - defines bus width of the DMA slave 350c156d0a5SLinus Walleij * device, source or target buses 351c156d0a5SLinus Walleij */ 352c156d0a5SLinus Walleij enum dma_slave_buswidth { 353c156d0a5SLinus Walleij DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, 354c156d0a5SLinus Walleij DMA_SLAVE_BUSWIDTH_1_BYTE = 1, 355c156d0a5SLinus Walleij DMA_SLAVE_BUSWIDTH_2_BYTES = 2, 35693c6ee94SPeter Ujfalusi DMA_SLAVE_BUSWIDTH_3_BYTES = 3, 357c156d0a5SLinus Walleij DMA_SLAVE_BUSWIDTH_4_BYTES = 4, 358c156d0a5SLinus Walleij DMA_SLAVE_BUSWIDTH_8_BYTES = 8, 359534a7298SLaurent Pinchart DMA_SLAVE_BUSWIDTH_16_BYTES = 16, 360534a7298SLaurent Pinchart DMA_SLAVE_BUSWIDTH_32_BYTES = 32, 361534a7298SLaurent Pinchart DMA_SLAVE_BUSWIDTH_64_BYTES = 64, 362c156d0a5SLinus Walleij }; 363c156d0a5SLinus Walleij 364c156d0a5SLinus Walleij /** 365c156d0a5SLinus Walleij * struct dma_slave_config - dma slave channel runtime config 366c156d0a5SLinus Walleij * @direction: whether the data shall go in or out on this slave 367397321f4SAlexander Popov * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are 368d9ff958bSLaurent Pinchart * legal values. DEPRECATED, drivers should use the direction argument 369d9ff958bSLaurent Pinchart * to the device_prep_slave_sg and device_prep_dma_cyclic functions or 370d9ff958bSLaurent Pinchart * the dir field in the dma_interleaved_template structure. 371c156d0a5SLinus Walleij * @src_addr: this is the physical address where DMA slave data 372c156d0a5SLinus Walleij * should be read (RX), if the source is memory this argument is 373c156d0a5SLinus Walleij * ignored. 374c156d0a5SLinus Walleij * @dst_addr: this is the physical address where DMA slave data 375c156d0a5SLinus Walleij * should be written (TX), if the source is memory this argument 376c156d0a5SLinus Walleij * is ignored. 377c156d0a5SLinus Walleij * @src_addr_width: this is the width in bytes of the source (RX) 378c156d0a5SLinus Walleij * register where DMA data shall be read. If the source 379c156d0a5SLinus Walleij * is memory this may be ignored depending on architecture. 3803f7632e1SStefan Brüns * Legal values: 1, 2, 3, 4, 8, 16, 32, 64. 381c156d0a5SLinus Walleij * @dst_addr_width: same as src_addr_width but for destination 382c156d0a5SLinus Walleij * target (TX) mutatis mutandis. 383c156d0a5SLinus Walleij * @src_maxburst: the maximum number of words (note: words, as in 384c156d0a5SLinus Walleij * units of the src_addr_width member, not bytes) that can be sent 385c156d0a5SLinus Walleij * in one burst to the device. Typically something like half the 386c156d0a5SLinus Walleij * FIFO depth on I/O peripherals so you don't overflow it. This 387c156d0a5SLinus Walleij * may or may not be applicable on memory sources. 388c156d0a5SLinus Walleij * @dst_maxburst: same as src_maxburst but for destination target 389c156d0a5SLinus Walleij * mutatis mutandis. 39054cd2558SPeter Ujfalusi * @src_port_window_size: The length of the register area in words the data need 39154cd2558SPeter Ujfalusi * to be accessed on the device side. It is only used for devices which is using 39254cd2558SPeter Ujfalusi * an area instead of a single register to receive the data. Typically the DMA 39354cd2558SPeter Ujfalusi * loops in this area in order to transfer the data. 39454cd2558SPeter Ujfalusi * @dst_port_window_size: same as src_port_window_size but for the destination 39554cd2558SPeter Ujfalusi * port. 396dcc043dcSViresh Kumar * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill 397dcc043dcSViresh Kumar * with 'true' if peripheral should be flow controller. Direction will be 398dcc043dcSViresh Kumar * selected at Runtime. 3994fd1e324SLaxman Dewangan * @slave_id: Slave requester id. Only valid for slave channels. The dma 4004fd1e324SLaxman Dewangan * slave peripheral will have unique id as dma requester which need to be 4014fd1e324SLaxman Dewangan * pass as slave config. 402c156d0a5SLinus Walleij * 403c156d0a5SLinus Walleij * This struct is passed in as configuration data to a DMA engine 404c156d0a5SLinus Walleij * in order to set up a certain channel for DMA transport at runtime. 405c156d0a5SLinus Walleij * The DMA device/engine has to provide support for an additional 4062c44ad91SMaxime Ripard * callback in the dma_device structure, device_config and this struct 4072c44ad91SMaxime Ripard * will then be passed in as an argument to the function. 408c156d0a5SLinus Walleij * 4097cbccb55SLars-Peter Clausen * The rationale for adding configuration information to this struct is as 4107cbccb55SLars-Peter Clausen * follows: if it is likely that more than one DMA slave controllers in 4117cbccb55SLars-Peter Clausen * the world will support the configuration option, then make it generic. 4127cbccb55SLars-Peter Clausen * If not: if it is fixed so that it be sent in static from the platform 4137cbccb55SLars-Peter Clausen * data, then prefer to do that. 414c156d0a5SLinus Walleij */ 415c156d0a5SLinus Walleij struct dma_slave_config { 41649920bc6SVinod Koul enum dma_transfer_direction direction; 41795756320SVinod Koul phys_addr_t src_addr; 41895756320SVinod Koul phys_addr_t dst_addr; 419c156d0a5SLinus Walleij enum dma_slave_buswidth src_addr_width; 420c156d0a5SLinus Walleij enum dma_slave_buswidth dst_addr_width; 421c156d0a5SLinus Walleij u32 src_maxburst; 422c156d0a5SLinus Walleij u32 dst_maxburst; 42354cd2558SPeter Ujfalusi u32 src_port_window_size; 42454cd2558SPeter Ujfalusi u32 dst_port_window_size; 425dcc043dcSViresh Kumar bool device_fc; 4264fd1e324SLaxman Dewangan unsigned int slave_id; 427c156d0a5SLinus Walleij }; 428c156d0a5SLinus Walleij 42950720563SLars-Peter Clausen /** 43050720563SLars-Peter Clausen * enum dma_residue_granularity - Granularity of the reported transfer residue 43150720563SLars-Peter Clausen * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The 43250720563SLars-Peter Clausen * DMA channel is only able to tell whether a descriptor has been completed or 43350720563SLars-Peter Clausen * not, which means residue reporting is not supported by this channel. The 43450720563SLars-Peter Clausen * residue field of the dma_tx_state field will always be 0. 43550720563SLars-Peter Clausen * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully 43650720563SLars-Peter Clausen * completed segment of the transfer (For cyclic transfers this is after each 43750720563SLars-Peter Clausen * period). This is typically implemented by having the hardware generate an 43850720563SLars-Peter Clausen * interrupt after each transferred segment and then the drivers updates the 43950720563SLars-Peter Clausen * outstanding residue by the size of the segment. Another possibility is if 44050720563SLars-Peter Clausen * the hardware supports scatter-gather and the segment descriptor has a field 44150720563SLars-Peter Clausen * which gets set after the segment has been completed. The driver then counts 44250720563SLars-Peter Clausen * the number of segments without the flag set to compute the residue. 44350720563SLars-Peter Clausen * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred 44450720563SLars-Peter Clausen * burst. This is typically only supported if the hardware has a progress 44550720563SLars-Peter Clausen * register of some sort (E.g. a register with the current read/write address 44650720563SLars-Peter Clausen * or a register with the amount of bursts/beats/bytes that have been 44750720563SLars-Peter Clausen * transferred or still need to be transferred). 44850720563SLars-Peter Clausen */ 44950720563SLars-Peter Clausen enum dma_residue_granularity { 45050720563SLars-Peter Clausen DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, 45150720563SLars-Peter Clausen DMA_RESIDUE_GRANULARITY_SEGMENT = 1, 45250720563SLars-Peter Clausen DMA_RESIDUE_GRANULARITY_BURST = 2, 45350720563SLars-Peter Clausen }; 45450720563SLars-Peter Clausen 455c2cbd427SStefan Brüns /** 456c2cbd427SStefan Brüns * struct dma_slave_caps - expose capabilities of a slave channel only 457c2cbd427SStefan Brüns * @src_addr_widths: bit mask of src addr widths the channel supports. 458c2cbd427SStefan Brüns * Width is specified in bytes, e.g. for a channel supporting 459c2cbd427SStefan Brüns * a width of 4 the mask should have BIT(4) set. 460c2cbd427SStefan Brüns * @dst_addr_widths: bit mask of dst addr widths the channel supports 461c2cbd427SStefan Brüns * @directions: bit mask of slave directions the channel supports. 462c2cbd427SStefan Brüns * Since the enum dma_transfer_direction is not defined as bit flag for 463c2cbd427SStefan Brüns * each type, the dma controller should set BIT(<TYPE>) and same 464221a27c7SVinod Koul * should be checked by controller as well 4656d5bbed3SShawn Lin * @max_burst: max burst capability per-transfer 466d8095f94SMarek Szyprowski * @cmd_pause: true, if pause is supported (i.e. for reading residue or 467d8095f94SMarek Szyprowski * for resume later) 468d8095f94SMarek Szyprowski * @cmd_resume: true, if resume is supported 469221a27c7SVinod Koul * @cmd_terminate: true, if terminate cmd is supported 47050720563SLars-Peter Clausen * @residue_granularity: granularity of the reported transfer residue 47127242021SVinod Koul * @descriptor_reuse: if a descriptor can be reused by client and 47227242021SVinod Koul * resubmitted multiple times 473221a27c7SVinod Koul */ 474221a27c7SVinod Koul struct dma_slave_caps { 475221a27c7SVinod Koul u32 src_addr_widths; 476ceacbdbfSMaxime Ripard u32 dst_addr_widths; 477221a27c7SVinod Koul u32 directions; 4786d5bbed3SShawn Lin u32 max_burst; 479221a27c7SVinod Koul bool cmd_pause; 480d8095f94SMarek Szyprowski bool cmd_resume; 481221a27c7SVinod Koul bool cmd_terminate; 48250720563SLars-Peter Clausen enum dma_residue_granularity residue_granularity; 48327242021SVinod Koul bool descriptor_reuse; 484221a27c7SVinod Koul }; 485221a27c7SVinod Koul 48641d5e59cSDan Williams static inline const char *dma_chan_name(struct dma_chan *chan) 48741d5e59cSDan Williams { 48841d5e59cSDan Williams return dev_name(&chan->dev->device); 48941d5e59cSDan Williams } 490d379b01eSDan Williams 491c13c8260SChris Leech void dma_chan_cleanup(struct kref *kref); 492c13c8260SChris Leech 493c13c8260SChris Leech /** 49459b5ec21SDan Williams * typedef dma_filter_fn - callback filter for dma_request_channel 49559b5ec21SDan Williams * @chan: channel to be reviewed 49659b5ec21SDan Williams * @filter_param: opaque parameter passed through dma_request_channel 49759b5ec21SDan Williams * 49859b5ec21SDan Williams * When this optional parameter is specified in a call to dma_request_channel a 49959b5ec21SDan Williams * suitable channel is passed to this routine for further dispositioning before 50059b5ec21SDan Williams * being returned. Where 'suitable' indicates a non-busy channel that 5017dd60251SDan Williams * satisfies the given capability mask. It returns 'true' to indicate that the 5027dd60251SDan Williams * channel is suitable. 50359b5ec21SDan Williams */ 5047dd60251SDan Williams typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 50559b5ec21SDan Williams 5067405f74bSDan Williams typedef void (*dma_async_tx_callback)(void *dma_async_param); 507d38a8c62SDan Williams 508f067025bSDave Jiang enum dmaengine_tx_result { 509f067025bSDave Jiang DMA_TRANS_NOERROR = 0, /* SUCCESS */ 510f067025bSDave Jiang DMA_TRANS_READ_FAILED, /* Source DMA read failed */ 511f067025bSDave Jiang DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */ 512f067025bSDave Jiang DMA_TRANS_ABORTED, /* Op never submitted / aborted */ 513f067025bSDave Jiang }; 514f067025bSDave Jiang 515f067025bSDave Jiang struct dmaengine_result { 516f067025bSDave Jiang enum dmaengine_tx_result result; 517f067025bSDave Jiang u32 residue; 518f067025bSDave Jiang }; 519f067025bSDave Jiang 520f067025bSDave Jiang typedef void (*dma_async_tx_callback_result)(void *dma_async_param, 521f067025bSDave Jiang const struct dmaengine_result *result); 522f067025bSDave Jiang 523d38a8c62SDan Williams struct dmaengine_unmap_data { 5240c0eb4caSZi Yan #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 5250c0eb4caSZi Yan u16 map_cnt; 5260c0eb4caSZi Yan #else 527c1f43dd9SXuelin Shi u8 map_cnt; 5280c0eb4caSZi Yan #endif 529d38a8c62SDan Williams u8 to_cnt; 530d38a8c62SDan Williams u8 from_cnt; 531d38a8c62SDan Williams u8 bidi_cnt; 532d38a8c62SDan Williams struct device *dev; 533d38a8c62SDan Williams struct kref kref; 534d38a8c62SDan Williams size_t len; 535d38a8c62SDan Williams dma_addr_t addr[0]; 536d38a8c62SDan Williams }; 537d38a8c62SDan Williams 5384db8fd32SPeter Ujfalusi struct dma_async_tx_descriptor; 5394db8fd32SPeter Ujfalusi 5404db8fd32SPeter Ujfalusi struct dma_descriptor_metadata_ops { 5414db8fd32SPeter Ujfalusi int (*attach)(struct dma_async_tx_descriptor *desc, void *data, 5424db8fd32SPeter Ujfalusi size_t len); 5434db8fd32SPeter Ujfalusi 5444db8fd32SPeter Ujfalusi void *(*get_ptr)(struct dma_async_tx_descriptor *desc, 5454db8fd32SPeter Ujfalusi size_t *payload_len, size_t *max_len); 5464db8fd32SPeter Ujfalusi int (*set_len)(struct dma_async_tx_descriptor *desc, 5474db8fd32SPeter Ujfalusi size_t payload_len); 5484db8fd32SPeter Ujfalusi }; 5494db8fd32SPeter Ujfalusi 5507405f74bSDan Williams /** 5517405f74bSDan Williams * struct dma_async_tx_descriptor - async transaction descriptor 5527405f74bSDan Williams * ---dma generic offload fields--- 5537405f74bSDan Williams * @cookie: tracking cookie for this transaction, set to -EBUSY if 5547405f74bSDan Williams * this tx is sitting on a dependency list 555636bdeaaSDan Williams * @flags: flags to augment operation preparation, control completion, and 556636bdeaaSDan Williams * communicate status 5577405f74bSDan Williams * @phys: physical address of the descriptor 5587405f74bSDan Williams * @chan: target channel for this operation 559aba96badSVinod Koul * @tx_submit: accept the descriptor, assign ordered cookie and mark the 560aba96badSVinod Koul * descriptor pending. To be pushed on .issue_pending() call 5617405f74bSDan Williams * @callback: routine to call after this operation is complete 5627405f74bSDan Williams * @callback_param: general parameter to pass to the callback routine 5634db8fd32SPeter Ujfalusi * @desc_metadata_mode: core managed metadata mode to protect mixed use of 5644db8fd32SPeter Ujfalusi * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise 5654db8fd32SPeter Ujfalusi * DESC_METADATA_NONE 5664db8fd32SPeter Ujfalusi * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the 5674db8fd32SPeter Ujfalusi * DMA driver if metadata mode is supported with the descriptor 5687405f74bSDan Williams * ---async_tx api specific fields--- 56919242d72SDan Williams * @next: at completion submit this descriptor 5707405f74bSDan Williams * @parent: pointer to the next level up in the dependency chain 57119242d72SDan Williams * @lock: protect the parent and next pointers 5727405f74bSDan Williams */ 5737405f74bSDan Williams struct dma_async_tx_descriptor { 5747405f74bSDan Williams dma_cookie_t cookie; 575636bdeaaSDan Williams enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ 5767405f74bSDan Williams dma_addr_t phys; 5777405f74bSDan Williams struct dma_chan *chan; 5787405f74bSDan Williams dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 57927242021SVinod Koul int (*desc_free)(struct dma_async_tx_descriptor *tx); 5807405f74bSDan Williams dma_async_tx_callback callback; 581f067025bSDave Jiang dma_async_tx_callback_result callback_result; 5827405f74bSDan Williams void *callback_param; 583d38a8c62SDan Williams struct dmaengine_unmap_data *unmap; 5844db8fd32SPeter Ujfalusi enum dma_desc_metadata_mode desc_metadata_mode; 5854db8fd32SPeter Ujfalusi struct dma_descriptor_metadata_ops *metadata_ops; 5865fc6d897SDan Williams #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 58719242d72SDan Williams struct dma_async_tx_descriptor *next; 5887405f74bSDan Williams struct dma_async_tx_descriptor *parent; 5897405f74bSDan Williams spinlock_t lock; 590caa20d97SDan Williams #endif 5917405f74bSDan Williams }; 5927405f74bSDan Williams 59389716462SDan Williams #ifdef CONFIG_DMA_ENGINE 594d38a8c62SDan Williams static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, 595d38a8c62SDan Williams struct dmaengine_unmap_data *unmap) 596d38a8c62SDan Williams { 597d38a8c62SDan Williams kref_get(&unmap->kref); 598d38a8c62SDan Williams tx->unmap = unmap; 599d38a8c62SDan Williams } 600d38a8c62SDan Williams 60189716462SDan Williams struct dmaengine_unmap_data * 60289716462SDan Williams dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); 60345c463aeSDan Williams void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); 60489716462SDan Williams #else 60589716462SDan Williams static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, 60689716462SDan Williams struct dmaengine_unmap_data *unmap) 60789716462SDan Williams { 60889716462SDan Williams } 60989716462SDan Williams static inline struct dmaengine_unmap_data * 61089716462SDan Williams dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) 61189716462SDan Williams { 61289716462SDan Williams return NULL; 61389716462SDan Williams } 61489716462SDan Williams static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) 61589716462SDan Williams { 61689716462SDan Williams } 61789716462SDan Williams #endif 61845c463aeSDan Williams 619d38a8c62SDan Williams static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) 620d38a8c62SDan Williams { 6213a92063bSAndy Shevchenko if (!tx->unmap) 6223a92063bSAndy Shevchenko return; 6233a92063bSAndy Shevchenko 62445c463aeSDan Williams dmaengine_unmap_put(tx->unmap); 625d38a8c62SDan Williams tx->unmap = NULL; 626d38a8c62SDan Williams } 627d38a8c62SDan Williams 6285fc6d897SDan Williams #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 629caa20d97SDan Williams static inline void txd_lock(struct dma_async_tx_descriptor *txd) 630caa20d97SDan Williams { 631caa20d97SDan Williams } 632caa20d97SDan Williams static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 633caa20d97SDan Williams { 634caa20d97SDan Williams } 635caa20d97SDan Williams static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 636caa20d97SDan Williams { 637caa20d97SDan Williams BUG(); 638caa20d97SDan Williams } 639caa20d97SDan Williams static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 640caa20d97SDan Williams { 641caa20d97SDan Williams } 642caa20d97SDan Williams static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 643caa20d97SDan Williams { 644caa20d97SDan Williams } 645caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 646caa20d97SDan Williams { 647caa20d97SDan Williams return NULL; 648caa20d97SDan Williams } 649caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 650caa20d97SDan Williams { 651caa20d97SDan Williams return NULL; 652caa20d97SDan Williams } 653caa20d97SDan Williams 654caa20d97SDan Williams #else 655caa20d97SDan Williams static inline void txd_lock(struct dma_async_tx_descriptor *txd) 656caa20d97SDan Williams { 657caa20d97SDan Williams spin_lock_bh(&txd->lock); 658caa20d97SDan Williams } 659caa20d97SDan Williams static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 660caa20d97SDan Williams { 661caa20d97SDan Williams spin_unlock_bh(&txd->lock); 662caa20d97SDan Williams } 663caa20d97SDan Williams static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 664caa20d97SDan Williams { 665caa20d97SDan Williams txd->next = next; 666caa20d97SDan Williams next->parent = txd; 667caa20d97SDan Williams } 668caa20d97SDan Williams static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 669caa20d97SDan Williams { 670caa20d97SDan Williams txd->parent = NULL; 671caa20d97SDan Williams } 672caa20d97SDan Williams static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 673caa20d97SDan Williams { 674caa20d97SDan Williams txd->next = NULL; 675caa20d97SDan Williams } 676caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 677caa20d97SDan Williams { 678caa20d97SDan Williams return txd->parent; 679caa20d97SDan Williams } 680caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 681caa20d97SDan Williams { 682caa20d97SDan Williams return txd->next; 683caa20d97SDan Williams } 684caa20d97SDan Williams #endif 685caa20d97SDan Williams 686c13c8260SChris Leech /** 68707934481SLinus Walleij * struct dma_tx_state - filled in to report the status of 68807934481SLinus Walleij * a transfer. 68907934481SLinus Walleij * @last: last completed DMA cookie 69007934481SLinus Walleij * @used: last issued DMA cookie (i.e. the one in progress) 69107934481SLinus Walleij * @residue: the remaining number of bytes left to transmit 69207934481SLinus Walleij * on the selected transfer for states DMA_IN_PROGRESS and 69307934481SLinus Walleij * DMA_PAUSED if this is implemented in the driver, else 0 6946755ec06SPeter Ujfalusi * @in_flight_bytes: amount of data in bytes cached by the DMA. 69507934481SLinus Walleij */ 69607934481SLinus Walleij struct dma_tx_state { 69707934481SLinus Walleij dma_cookie_t last; 69807934481SLinus Walleij dma_cookie_t used; 69907934481SLinus Walleij u32 residue; 7006755ec06SPeter Ujfalusi u32 in_flight_bytes; 70107934481SLinus Walleij }; 70207934481SLinus Walleij 70307934481SLinus Walleij /** 70477a68e56SMaxime Ripard * enum dmaengine_alignment - defines alignment of the DMA async tx 70577a68e56SMaxime Ripard * buffers 70677a68e56SMaxime Ripard */ 70777a68e56SMaxime Ripard enum dmaengine_alignment { 70877a68e56SMaxime Ripard DMAENGINE_ALIGN_1_BYTE = 0, 70977a68e56SMaxime Ripard DMAENGINE_ALIGN_2_BYTES = 1, 71077a68e56SMaxime Ripard DMAENGINE_ALIGN_4_BYTES = 2, 71177a68e56SMaxime Ripard DMAENGINE_ALIGN_8_BYTES = 3, 71277a68e56SMaxime Ripard DMAENGINE_ALIGN_16_BYTES = 4, 71377a68e56SMaxime Ripard DMAENGINE_ALIGN_32_BYTES = 5, 71477a68e56SMaxime Ripard DMAENGINE_ALIGN_64_BYTES = 6, 71577a68e56SMaxime Ripard }; 71677a68e56SMaxime Ripard 71777a68e56SMaxime Ripard /** 718a8135d0dSPeter Ujfalusi * struct dma_slave_map - associates slave device and it's slave channel with 719a8135d0dSPeter Ujfalusi * parameter to be used by a filter function 720a8135d0dSPeter Ujfalusi * @devname: name of the device 721a8135d0dSPeter Ujfalusi * @slave: slave channel name 722a8135d0dSPeter Ujfalusi * @param: opaque parameter to pass to struct dma_filter.fn 723a8135d0dSPeter Ujfalusi */ 724a8135d0dSPeter Ujfalusi struct dma_slave_map { 725a8135d0dSPeter Ujfalusi const char *devname; 726a8135d0dSPeter Ujfalusi const char *slave; 727a8135d0dSPeter Ujfalusi void *param; 728a8135d0dSPeter Ujfalusi }; 729a8135d0dSPeter Ujfalusi 730a8135d0dSPeter Ujfalusi /** 731a8135d0dSPeter Ujfalusi * struct dma_filter - information for slave device/channel to filter_fn/param 732a8135d0dSPeter Ujfalusi * mapping 733a8135d0dSPeter Ujfalusi * @fn: filter function callback 734a8135d0dSPeter Ujfalusi * @mapcnt: number of slave device/channel in the map 735a8135d0dSPeter Ujfalusi * @map: array of channel to filter mapping data 736a8135d0dSPeter Ujfalusi */ 737a8135d0dSPeter Ujfalusi struct dma_filter { 738a8135d0dSPeter Ujfalusi dma_filter_fn fn; 739a8135d0dSPeter Ujfalusi int mapcnt; 740a8135d0dSPeter Ujfalusi const struct dma_slave_map *map; 741a8135d0dSPeter Ujfalusi }; 742a8135d0dSPeter Ujfalusi 743a8135d0dSPeter Ujfalusi /** 744c13c8260SChris Leech * struct dma_device - info on the entity supplying DMA services 745c13c8260SChris Leech * @chancnt: how many DMA channels are supported 7460f571515SAtsushi Nemoto * @privatecnt: how many DMA channels are requested by dma_request_channel 747c13c8260SChris Leech * @channels: the list of struct dma_chan 748c13c8260SChris Leech * @global_node: list_head for global dma_device_list 749a8135d0dSPeter Ujfalusi * @filter: information for device/slave to filter function/param mapping 7507405f74bSDan Williams * @cap_mask: one or more dma_capability flags 7514db8fd32SPeter Ujfalusi * @desc_metadata_modes: supported metadata modes by the DMA device 7527405f74bSDan Williams * @max_xor: maximum number of xor sources, 0 if no capability 753b2f46fd8SDan Williams * @max_pq: maximum number of PQ sources and PQ-continue capability 75483544ae9SDan Williams * @copy_align: alignment shift for memcpy operations 75583544ae9SDan Williams * @xor_align: alignment shift for xor operations 75683544ae9SDan Williams * @pq_align: alignment shift for pq operations 7574983a501SMaxime Ripard * @fill_align: alignment shift for memset operations 758fe4ada2dSRandy Dunlap * @dev_id: unique device ID 7597405f74bSDan Williams * @dev: struct device reference for dma mapping api 760dae7a589SLogan Gunthorpe * @owner: owner module (automatically set based on the provided dev) 761cb8cea51SMaxime Ripard * @src_addr_widths: bit mask of src addr widths the device supports 762c2cbd427SStefan Brüns * Width is specified in bytes, e.g. for a device supporting 763c2cbd427SStefan Brüns * a width of 4 the mask should have BIT(4) set. 764cb8cea51SMaxime Ripard * @dst_addr_widths: bit mask of dst addr widths the device supports 765c2cbd427SStefan Brüns * @directions: bit mask of slave directions the device supports. 766c2cbd427SStefan Brüns * Since the enum dma_transfer_direction is not defined as bit flag for 767c2cbd427SStefan Brüns * each type, the dma controller should set BIT(<TYPE>) and same 768c2cbd427SStefan Brüns * should be checked by controller as well 7696d5bbed3SShawn Lin * @max_burst: max burst capability per-transfer 770cb8cea51SMaxime Ripard * @residue_granularity: granularity of the transfer residue reported 771cb8cea51SMaxime Ripard * by tx_status 772fe4ada2dSRandy Dunlap * @device_alloc_chan_resources: allocate resources and return the 773fe4ada2dSRandy Dunlap * number of allocated descriptors 774fe4ada2dSRandy Dunlap * @device_free_chan_resources: release DMA channel's resources 7757405f74bSDan Williams * @device_prep_dma_memcpy: prepares a memcpy operation 7767405f74bSDan Williams * @device_prep_dma_xor: prepares a xor operation 777099f53cbSDan Williams * @device_prep_dma_xor_val: prepares a xor validation operation 778b2f46fd8SDan Williams * @device_prep_dma_pq: prepares a pq operation 779b2f46fd8SDan Williams * @device_prep_dma_pq_val: prepares a pqzero_sum operation 7804983a501SMaxime Ripard * @device_prep_dma_memset: prepares a memset operation 78150c7cd2bSMaxime Ripard * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list 7827405f74bSDan Williams * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 783dc0ee643SHaavard Skinnemoen * @device_prep_slave_sg: prepares a slave dma operation 784782bc950SSascha Hauer * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 785782bc950SSascha Hauer * The function takes a buffer of size buf_len. The callback function will 786782bc950SSascha Hauer * be called after period_len bytes have been transferred. 787b14dab79SJassi Brar * @device_prep_interleaved_dma: Transfer expression in a generic way. 788ff39988aSSiva Yerramreddy * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address 78994a73e30SMaxime Ripard * @device_config: Pushes a new configuration to a channel, return 0 or an error 79094a73e30SMaxime Ripard * code 79123a3ea2fSMaxime Ripard * @device_pause: Pauses any transfer happening on a channel. Returns 79223a3ea2fSMaxime Ripard * 0 or an error code 79323a3ea2fSMaxime Ripard * @device_resume: Resumes any transfer on a channel previously 79423a3ea2fSMaxime Ripard * paused. Returns 0 or an error code 7957fa0cf46SMaxime Ripard * @device_terminate_all: Aborts all transfers on a channel. Returns 0 7967fa0cf46SMaxime Ripard * or an error code 797b36f09c3SLars-Peter Clausen * @device_synchronize: Synchronizes the termination of a transfers to the 798b36f09c3SLars-Peter Clausen * current context. 79907934481SLinus Walleij * @device_tx_status: poll for transaction completion, the optional 80007934481SLinus Walleij * txstate parameter can be supplied with a pointer to get a 80125985edcSLucas De Marchi * struct with auxiliary transfer status information, otherwise the call 80207934481SLinus Walleij * will just return a simple status code 8037405f74bSDan Williams * @device_issue_pending: push pending transactions to hardware 8049eeacd3aSRobert Jarzmik * @descriptor_reuse: a submitted transfer can be resubmitted after completion 8058ad342a8SLogan Gunthorpe * @device_release: called sometime atfer dma_async_device_unregister() is 8068ad342a8SLogan Gunthorpe * called and there are no further references to this structure. This 8078ad342a8SLogan Gunthorpe * must be implemented to free resources however many existing drivers 8088ad342a8SLogan Gunthorpe * do not and are therefore not safe to unbind while in use. 8098ad342a8SLogan Gunthorpe * 810c13c8260SChris Leech */ 811c13c8260SChris Leech struct dma_device { 8128ad342a8SLogan Gunthorpe struct kref ref; 813c13c8260SChris Leech unsigned int chancnt; 8140f571515SAtsushi Nemoto unsigned int privatecnt; 815c13c8260SChris Leech struct list_head channels; 816c13c8260SChris Leech struct list_head global_node; 817a8135d0dSPeter Ujfalusi struct dma_filter filter; 8187405f74bSDan Williams dma_cap_mask_t cap_mask; 8194db8fd32SPeter Ujfalusi enum dma_desc_metadata_mode desc_metadata_modes; 820b2f46fd8SDan Williams unsigned short max_xor; 821b2f46fd8SDan Williams unsigned short max_pq; 82277a68e56SMaxime Ripard enum dmaengine_alignment copy_align; 82377a68e56SMaxime Ripard enum dmaengine_alignment xor_align; 82477a68e56SMaxime Ripard enum dmaengine_alignment pq_align; 82577a68e56SMaxime Ripard enum dmaengine_alignment fill_align; 826b2f46fd8SDan Williams #define DMA_HAS_PQ_CONTINUE (1 << 15) 827c13c8260SChris Leech 828c13c8260SChris Leech int dev_id; 8297405f74bSDan Williams struct device *dev; 830dae7a589SLogan Gunthorpe struct module *owner; 831c13c8260SChris Leech 832cb8cea51SMaxime Ripard u32 src_addr_widths; 833cb8cea51SMaxime Ripard u32 dst_addr_widths; 834cb8cea51SMaxime Ripard u32 directions; 8356d5bbed3SShawn Lin u32 max_burst; 8369eeacd3aSRobert Jarzmik bool descriptor_reuse; 837cb8cea51SMaxime Ripard enum dma_residue_granularity residue_granularity; 838cb8cea51SMaxime Ripard 839aa1e6f1aSDan Williams int (*device_alloc_chan_resources)(struct dma_chan *chan); 840c13c8260SChris Leech void (*device_free_chan_resources)(struct dma_chan *chan); 8417405f74bSDan Williams 8427405f74bSDan Williams struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 843ceacbdbfSMaxime Ripard struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 844d4c56f97SDan Williams size_t len, unsigned long flags); 8457405f74bSDan Williams struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 846ceacbdbfSMaxime Ripard struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 847d4c56f97SDan Williams unsigned int src_cnt, size_t len, unsigned long flags); 848099f53cbSDan Williams struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( 8490036731cSDan Williams struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 850ad283ea4SDan Williams size_t len, enum sum_check_flags *result, unsigned long flags); 851b2f46fd8SDan Williams struct dma_async_tx_descriptor *(*device_prep_dma_pq)( 852b2f46fd8SDan Williams struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 853b2f46fd8SDan Williams unsigned int src_cnt, const unsigned char *scf, 854b2f46fd8SDan Williams size_t len, unsigned long flags); 855b2f46fd8SDan Williams struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( 856b2f46fd8SDan Williams struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 857b2f46fd8SDan Williams unsigned int src_cnt, const unsigned char *scf, size_t len, 858b2f46fd8SDan Williams enum sum_check_flags *pqres, unsigned long flags); 8594983a501SMaxime Ripard struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 8604983a501SMaxime Ripard struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 8614983a501SMaxime Ripard unsigned long flags); 86250c7cd2bSMaxime Ripard struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)( 86350c7cd2bSMaxime Ripard struct dma_chan *chan, struct scatterlist *sg, 86450c7cd2bSMaxime Ripard unsigned int nents, int value, unsigned long flags); 8657405f74bSDan Williams struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 866636bdeaaSDan Williams struct dma_chan *chan, unsigned long flags); 8677405f74bSDan Williams 868dc0ee643SHaavard Skinnemoen struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 869dc0ee643SHaavard Skinnemoen struct dma_chan *chan, struct scatterlist *sgl, 87049920bc6SVinod Koul unsigned int sg_len, enum dma_transfer_direction direction, 871185ecb5fSAlexandre Bounine unsigned long flags, void *context); 872782bc950SSascha Hauer struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 873782bc950SSascha Hauer struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 874185ecb5fSAlexandre Bounine size_t period_len, enum dma_transfer_direction direction, 87531c1e5a1SLaurent Pinchart unsigned long flags); 876b14dab79SJassi Brar struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( 877b14dab79SJassi Brar struct dma_chan *chan, struct dma_interleaved_template *xt, 878b14dab79SJassi Brar unsigned long flags); 879ff39988aSSiva Yerramreddy struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( 880ff39988aSSiva Yerramreddy struct dma_chan *chan, dma_addr_t dst, u64 data, 881ff39988aSSiva Yerramreddy unsigned long flags); 88294a73e30SMaxime Ripard 88394a73e30SMaxime Ripard int (*device_config)(struct dma_chan *chan, 88494a73e30SMaxime Ripard struct dma_slave_config *config); 88523a3ea2fSMaxime Ripard int (*device_pause)(struct dma_chan *chan); 88623a3ea2fSMaxime Ripard int (*device_resume)(struct dma_chan *chan); 8877fa0cf46SMaxime Ripard int (*device_terminate_all)(struct dma_chan *chan); 888b36f09c3SLars-Peter Clausen void (*device_synchronize)(struct dma_chan *chan); 889dc0ee643SHaavard Skinnemoen 89007934481SLinus Walleij enum dma_status (*device_tx_status)(struct dma_chan *chan, 89107934481SLinus Walleij dma_cookie_t cookie, 89207934481SLinus Walleij struct dma_tx_state *txstate); 8937405f74bSDan Williams void (*device_issue_pending)(struct dma_chan *chan); 8948ad342a8SLogan Gunthorpe void (*device_release)(struct dma_device *dev); 895c13c8260SChris Leech }; 896c13c8260SChris Leech 8976e3ecaf0SSascha Hauer static inline int dmaengine_slave_config(struct dma_chan *chan, 8986e3ecaf0SSascha Hauer struct dma_slave_config *config) 8996e3ecaf0SSascha Hauer { 90094a73e30SMaxime Ripard if (chan->device->device_config) 90194a73e30SMaxime Ripard return chan->device->device_config(chan, config); 90294a73e30SMaxime Ripard 9032c44ad91SMaxime Ripard return -ENOSYS; 9046e3ecaf0SSascha Hauer } 9056e3ecaf0SSascha Hauer 90661cc13a5SAndy Shevchenko static inline bool is_slave_direction(enum dma_transfer_direction direction) 90761cc13a5SAndy Shevchenko { 90861cc13a5SAndy Shevchenko return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); 90961cc13a5SAndy Shevchenko } 91061cc13a5SAndy Shevchenko 91190b44f8fSVinod Koul static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 912922ee08bSKuninori Morimoto struct dma_chan *chan, dma_addr_t buf, size_t len, 91349920bc6SVinod Koul enum dma_transfer_direction dir, unsigned long flags) 91490b44f8fSVinod Koul { 91590b44f8fSVinod Koul struct scatterlist sg; 916922ee08bSKuninori Morimoto sg_init_table(&sg, 1); 917922ee08bSKuninori Morimoto sg_dma_address(&sg) = buf; 918922ee08bSKuninori Morimoto sg_dma_len(&sg) = len; 91990b44f8fSVinod Koul 920757d12e5SVinod Koul if (!chan || !chan->device || !chan->device->device_prep_slave_sg) 921757d12e5SVinod Koul return NULL; 922757d12e5SVinod Koul 923185ecb5fSAlexandre Bounine return chan->device->device_prep_slave_sg(chan, &sg, 1, 924185ecb5fSAlexandre Bounine dir, flags, NULL); 92590b44f8fSVinod Koul } 92690b44f8fSVinod Koul 92716052827SAlexandre Bounine static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( 92816052827SAlexandre Bounine struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 92916052827SAlexandre Bounine enum dma_transfer_direction dir, unsigned long flags) 93016052827SAlexandre Bounine { 931757d12e5SVinod Koul if (!chan || !chan->device || !chan->device->device_prep_slave_sg) 932757d12e5SVinod Koul return NULL; 933757d12e5SVinod Koul 93416052827SAlexandre Bounine return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 935185ecb5fSAlexandre Bounine dir, flags, NULL); 93616052827SAlexandre Bounine } 93716052827SAlexandre Bounine 938e42d98ebSAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE 939e42d98ebSAlexandre Bounine struct rio_dma_ext; 940e42d98ebSAlexandre Bounine static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( 941e42d98ebSAlexandre Bounine struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 942e42d98ebSAlexandre Bounine enum dma_transfer_direction dir, unsigned long flags, 943e42d98ebSAlexandre Bounine struct rio_dma_ext *rio_ext) 944e42d98ebSAlexandre Bounine { 945757d12e5SVinod Koul if (!chan || !chan->device || !chan->device->device_prep_slave_sg) 946757d12e5SVinod Koul return NULL; 947757d12e5SVinod Koul 948e42d98ebSAlexandre Bounine return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 949e42d98ebSAlexandre Bounine dir, flags, rio_ext); 950e42d98ebSAlexandre Bounine } 951e42d98ebSAlexandre Bounine #endif 952e42d98ebSAlexandre Bounine 95316052827SAlexandre Bounine static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( 95416052827SAlexandre Bounine struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 955e7736cdeSPeter Ujfalusi size_t period_len, enum dma_transfer_direction dir, 956e7736cdeSPeter Ujfalusi unsigned long flags) 95716052827SAlexandre Bounine { 958757d12e5SVinod Koul if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) 959757d12e5SVinod Koul return NULL; 960757d12e5SVinod Koul 96116052827SAlexandre Bounine return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, 96231c1e5a1SLaurent Pinchart period_len, dir, flags); 9636e3ecaf0SSascha Hauer } 9646e3ecaf0SSascha Hauer 965a14acb4aSBarry Song static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( 966a14acb4aSBarry Song struct dma_chan *chan, struct dma_interleaved_template *xt, 967a14acb4aSBarry Song unsigned long flags) 968a14acb4aSBarry Song { 969757d12e5SVinod Koul if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) 970757d12e5SVinod Koul return NULL; 971757d12e5SVinod Koul 972a14acb4aSBarry Song return chan->device->device_prep_interleaved_dma(chan, xt, flags); 973a14acb4aSBarry Song } 974a14acb4aSBarry Song 9754983a501SMaxime Ripard static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( 9764983a501SMaxime Ripard struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 9774983a501SMaxime Ripard unsigned long flags) 9784983a501SMaxime Ripard { 979757d12e5SVinod Koul if (!chan || !chan->device || !chan->device->device_prep_dma_memset) 9804983a501SMaxime Ripard return NULL; 9814983a501SMaxime Ripard 9824983a501SMaxime Ripard return chan->device->device_prep_dma_memset(chan, dest, value, 9834983a501SMaxime Ripard len, flags); 9844983a501SMaxime Ripard } 9854983a501SMaxime Ripard 98677d65d6fSBoris Brezillon static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( 98777d65d6fSBoris Brezillon struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 98877d65d6fSBoris Brezillon size_t len, unsigned long flags) 98977d65d6fSBoris Brezillon { 99077d65d6fSBoris Brezillon if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy) 99177d65d6fSBoris Brezillon return NULL; 99277d65d6fSBoris Brezillon 99377d65d6fSBoris Brezillon return chan->device->device_prep_dma_memcpy(chan, dest, src, 99477d65d6fSBoris Brezillon len, flags); 99577d65d6fSBoris Brezillon } 99677d65d6fSBoris Brezillon 9974db8fd32SPeter Ujfalusi static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, 9984db8fd32SPeter Ujfalusi enum dma_desc_metadata_mode mode) 9994db8fd32SPeter Ujfalusi { 10004db8fd32SPeter Ujfalusi if (!chan) 10014db8fd32SPeter Ujfalusi return false; 10024db8fd32SPeter Ujfalusi 10034db8fd32SPeter Ujfalusi return !!(chan->device->desc_metadata_modes & mode); 10044db8fd32SPeter Ujfalusi } 10054db8fd32SPeter Ujfalusi 10064db8fd32SPeter Ujfalusi #ifdef CONFIG_DMA_ENGINE 10074db8fd32SPeter Ujfalusi int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, 10084db8fd32SPeter Ujfalusi void *data, size_t len); 10094db8fd32SPeter Ujfalusi void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 10104db8fd32SPeter Ujfalusi size_t *payload_len, size_t *max_len); 10114db8fd32SPeter Ujfalusi int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, 10124db8fd32SPeter Ujfalusi size_t payload_len); 10134db8fd32SPeter Ujfalusi #else /* CONFIG_DMA_ENGINE */ 10144db8fd32SPeter Ujfalusi static inline int dmaengine_desc_attach_metadata( 10154db8fd32SPeter Ujfalusi struct dma_async_tx_descriptor *desc, void *data, size_t len) 10164db8fd32SPeter Ujfalusi { 10174db8fd32SPeter Ujfalusi return -EINVAL; 10184db8fd32SPeter Ujfalusi } 10194db8fd32SPeter Ujfalusi static inline void *dmaengine_desc_get_metadata_ptr( 10204db8fd32SPeter Ujfalusi struct dma_async_tx_descriptor *desc, size_t *payload_len, 10214db8fd32SPeter Ujfalusi size_t *max_len) 10224db8fd32SPeter Ujfalusi { 10234db8fd32SPeter Ujfalusi return NULL; 10244db8fd32SPeter Ujfalusi } 10254db8fd32SPeter Ujfalusi static inline int dmaengine_desc_set_metadata_len( 10264db8fd32SPeter Ujfalusi struct dma_async_tx_descriptor *desc, size_t payload_len) 10274db8fd32SPeter Ujfalusi { 10284db8fd32SPeter Ujfalusi return -EINVAL; 10294db8fd32SPeter Ujfalusi } 10304db8fd32SPeter Ujfalusi #endif /* CONFIG_DMA_ENGINE */ 10314db8fd32SPeter Ujfalusi 1032b36f09c3SLars-Peter Clausen /** 1033b36f09c3SLars-Peter Clausen * dmaengine_terminate_all() - Terminate all active DMA transfers 1034b36f09c3SLars-Peter Clausen * @chan: The channel for which to terminate the transfers 1035b36f09c3SLars-Peter Clausen * 1036b36f09c3SLars-Peter Clausen * This function is DEPRECATED use either dmaengine_terminate_sync() or 1037b36f09c3SLars-Peter Clausen * dmaengine_terminate_async() instead. 1038b36f09c3SLars-Peter Clausen */ 10396e3ecaf0SSascha Hauer static inline int dmaengine_terminate_all(struct dma_chan *chan) 10406e3ecaf0SSascha Hauer { 10417fa0cf46SMaxime Ripard if (chan->device->device_terminate_all) 10427fa0cf46SMaxime Ripard return chan->device->device_terminate_all(chan); 10437fa0cf46SMaxime Ripard 10442c44ad91SMaxime Ripard return -ENOSYS; 10456e3ecaf0SSascha Hauer } 10466e3ecaf0SSascha Hauer 1047b36f09c3SLars-Peter Clausen /** 1048b36f09c3SLars-Peter Clausen * dmaengine_terminate_async() - Terminate all active DMA transfers 1049b36f09c3SLars-Peter Clausen * @chan: The channel for which to terminate the transfers 1050b36f09c3SLars-Peter Clausen * 1051b36f09c3SLars-Peter Clausen * Calling this function will terminate all active and pending descriptors 1052b36f09c3SLars-Peter Clausen * that have previously been submitted to the channel. It is not guaranteed 1053b36f09c3SLars-Peter Clausen * though that the transfer for the active descriptor has stopped when the 1054b36f09c3SLars-Peter Clausen * function returns. Furthermore it is possible the complete callback of a 1055b36f09c3SLars-Peter Clausen * submitted transfer is still running when this function returns. 1056b36f09c3SLars-Peter Clausen * 1057b36f09c3SLars-Peter Clausen * dmaengine_synchronize() needs to be called before it is safe to free 1058b36f09c3SLars-Peter Clausen * any memory that is accessed by previously submitted descriptors or before 1059b36f09c3SLars-Peter Clausen * freeing any resources accessed from within the completion callback of any 1060b36f09c3SLars-Peter Clausen * perviously submitted descriptors. 1061b36f09c3SLars-Peter Clausen * 1062b36f09c3SLars-Peter Clausen * This function can be called from atomic context as well as from within a 1063b36f09c3SLars-Peter Clausen * complete callback of a descriptor submitted on the same channel. 1064b36f09c3SLars-Peter Clausen * 1065b36f09c3SLars-Peter Clausen * If none of the two conditions above apply consider using 1066b36f09c3SLars-Peter Clausen * dmaengine_terminate_sync() instead. 1067b36f09c3SLars-Peter Clausen */ 1068b36f09c3SLars-Peter Clausen static inline int dmaengine_terminate_async(struct dma_chan *chan) 1069b36f09c3SLars-Peter Clausen { 1070b36f09c3SLars-Peter Clausen if (chan->device->device_terminate_all) 1071b36f09c3SLars-Peter Clausen return chan->device->device_terminate_all(chan); 1072b36f09c3SLars-Peter Clausen 1073b36f09c3SLars-Peter Clausen return -EINVAL; 1074b36f09c3SLars-Peter Clausen } 1075b36f09c3SLars-Peter Clausen 1076b36f09c3SLars-Peter Clausen /** 1077b36f09c3SLars-Peter Clausen * dmaengine_synchronize() - Synchronize DMA channel termination 1078b36f09c3SLars-Peter Clausen * @chan: The channel to synchronize 1079b36f09c3SLars-Peter Clausen * 1080b36f09c3SLars-Peter Clausen * Synchronizes to the DMA channel termination to the current context. When this 1081b36f09c3SLars-Peter Clausen * function returns it is guaranteed that all transfers for previously issued 1082b36f09c3SLars-Peter Clausen * descriptors have stopped and and it is safe to free the memory assoicated 1083b36f09c3SLars-Peter Clausen * with them. Furthermore it is guaranteed that all complete callback functions 1084b36f09c3SLars-Peter Clausen * for a previously submitted descriptor have finished running and it is safe to 1085b36f09c3SLars-Peter Clausen * free resources accessed from within the complete callbacks. 1086b36f09c3SLars-Peter Clausen * 1087b36f09c3SLars-Peter Clausen * The behavior of this function is undefined if dma_async_issue_pending() has 1088b36f09c3SLars-Peter Clausen * been called between dmaengine_terminate_async() and this function. 1089b36f09c3SLars-Peter Clausen * 1090b36f09c3SLars-Peter Clausen * This function must only be called from non-atomic context and must not be 1091b36f09c3SLars-Peter Clausen * called from within a complete callback of a descriptor submitted on the same 1092b36f09c3SLars-Peter Clausen * channel. 1093b36f09c3SLars-Peter Clausen */ 1094b36f09c3SLars-Peter Clausen static inline void dmaengine_synchronize(struct dma_chan *chan) 1095b36f09c3SLars-Peter Clausen { 1096b1d6ab1aSLars-Peter Clausen might_sleep(); 1097b1d6ab1aSLars-Peter Clausen 1098b36f09c3SLars-Peter Clausen if (chan->device->device_synchronize) 1099b36f09c3SLars-Peter Clausen chan->device->device_synchronize(chan); 1100b36f09c3SLars-Peter Clausen } 1101b36f09c3SLars-Peter Clausen 1102b36f09c3SLars-Peter Clausen /** 1103b36f09c3SLars-Peter Clausen * dmaengine_terminate_sync() - Terminate all active DMA transfers 1104b36f09c3SLars-Peter Clausen * @chan: The channel for which to terminate the transfers 1105b36f09c3SLars-Peter Clausen * 1106b36f09c3SLars-Peter Clausen * Calling this function will terminate all active and pending transfers 1107b36f09c3SLars-Peter Clausen * that have previously been submitted to the channel. It is similar to 1108b36f09c3SLars-Peter Clausen * dmaengine_terminate_async() but guarantees that the DMA transfer has actually 1109b36f09c3SLars-Peter Clausen * stopped and that all complete callbacks have finished running when the 1110b36f09c3SLars-Peter Clausen * function returns. 1111b36f09c3SLars-Peter Clausen * 1112b36f09c3SLars-Peter Clausen * This function must only be called from non-atomic context and must not be 1113b36f09c3SLars-Peter Clausen * called from within a complete callback of a descriptor submitted on the same 1114b36f09c3SLars-Peter Clausen * channel. 1115b36f09c3SLars-Peter Clausen */ 1116b36f09c3SLars-Peter Clausen static inline int dmaengine_terminate_sync(struct dma_chan *chan) 1117b36f09c3SLars-Peter Clausen { 1118b36f09c3SLars-Peter Clausen int ret; 1119b36f09c3SLars-Peter Clausen 1120b36f09c3SLars-Peter Clausen ret = dmaengine_terminate_async(chan); 1121b36f09c3SLars-Peter Clausen if (ret) 1122b36f09c3SLars-Peter Clausen return ret; 1123b36f09c3SLars-Peter Clausen 1124b36f09c3SLars-Peter Clausen dmaengine_synchronize(chan); 1125b36f09c3SLars-Peter Clausen 1126b36f09c3SLars-Peter Clausen return 0; 1127b36f09c3SLars-Peter Clausen } 1128b36f09c3SLars-Peter Clausen 11296e3ecaf0SSascha Hauer static inline int dmaengine_pause(struct dma_chan *chan) 11306e3ecaf0SSascha Hauer { 113123a3ea2fSMaxime Ripard if (chan->device->device_pause) 113223a3ea2fSMaxime Ripard return chan->device->device_pause(chan); 113323a3ea2fSMaxime Ripard 11342c44ad91SMaxime Ripard return -ENOSYS; 11356e3ecaf0SSascha Hauer } 11366e3ecaf0SSascha Hauer 11376e3ecaf0SSascha Hauer static inline int dmaengine_resume(struct dma_chan *chan) 11386e3ecaf0SSascha Hauer { 113923a3ea2fSMaxime Ripard if (chan->device->device_resume) 114023a3ea2fSMaxime Ripard return chan->device->device_resume(chan); 114123a3ea2fSMaxime Ripard 11422c44ad91SMaxime Ripard return -ENOSYS; 11436e3ecaf0SSascha Hauer } 11446e3ecaf0SSascha Hauer 11453052cc2cSLars-Peter Clausen static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, 11463052cc2cSLars-Peter Clausen dma_cookie_t cookie, struct dma_tx_state *state) 11473052cc2cSLars-Peter Clausen { 11483052cc2cSLars-Peter Clausen return chan->device->device_tx_status(chan, cookie, state); 11493052cc2cSLars-Peter Clausen } 11503052cc2cSLars-Peter Clausen 115198d530feSRussell King - ARM Linux static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) 11526e3ecaf0SSascha Hauer { 11536e3ecaf0SSascha Hauer return desc->tx_submit(desc); 11546e3ecaf0SSascha Hauer } 11556e3ecaf0SSascha Hauer 115677a68e56SMaxime Ripard static inline bool dmaengine_check_align(enum dmaengine_alignment align, 115777a68e56SMaxime Ripard size_t off1, size_t off2, size_t len) 115883544ae9SDan Williams { 115988ac039cSAndy Shevchenko return !(((1 << align) - 1) & (off1 | off2 | len)); 116083544ae9SDan Williams } 116183544ae9SDan Williams 116283544ae9SDan Williams static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, 116383544ae9SDan Williams size_t off2, size_t len) 116483544ae9SDan Williams { 116583544ae9SDan Williams return dmaengine_check_align(dev->copy_align, off1, off2, len); 116683544ae9SDan Williams } 116783544ae9SDan Williams 116883544ae9SDan Williams static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, 116983544ae9SDan Williams size_t off2, size_t len) 117083544ae9SDan Williams { 117183544ae9SDan Williams return dmaengine_check_align(dev->xor_align, off1, off2, len); 117283544ae9SDan Williams } 117383544ae9SDan Williams 117483544ae9SDan Williams static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, 117583544ae9SDan Williams size_t off2, size_t len) 117683544ae9SDan Williams { 117783544ae9SDan Williams return dmaengine_check_align(dev->pq_align, off1, off2, len); 117883544ae9SDan Williams } 117983544ae9SDan Williams 11804983a501SMaxime Ripard static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, 11814983a501SMaxime Ripard size_t off2, size_t len) 11824983a501SMaxime Ripard { 11834983a501SMaxime Ripard return dmaengine_check_align(dev->fill_align, off1, off2, len); 11844983a501SMaxime Ripard } 11854983a501SMaxime Ripard 1186b2f46fd8SDan Williams static inline void 1187b2f46fd8SDan Williams dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) 1188b2f46fd8SDan Williams { 1189b2f46fd8SDan Williams dma->max_pq = maxpq; 1190b2f46fd8SDan Williams if (has_pq_continue) 1191b2f46fd8SDan Williams dma->max_pq |= DMA_HAS_PQ_CONTINUE; 1192b2f46fd8SDan Williams } 1193b2f46fd8SDan Williams 1194b2f46fd8SDan Williams static inline bool dmaf_continue(enum dma_ctrl_flags flags) 1195b2f46fd8SDan Williams { 1196b2f46fd8SDan Williams return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; 1197b2f46fd8SDan Williams } 1198b2f46fd8SDan Williams 1199b2f46fd8SDan Williams static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) 1200b2f46fd8SDan Williams { 1201b2f46fd8SDan Williams enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; 1202b2f46fd8SDan Williams 1203b2f46fd8SDan Williams return (flags & mask) == mask; 1204b2f46fd8SDan Williams } 1205b2f46fd8SDan Williams 1206b2f46fd8SDan Williams static inline bool dma_dev_has_pq_continue(struct dma_device *dma) 1207b2f46fd8SDan Williams { 1208b2f46fd8SDan Williams return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 1209b2f46fd8SDan Williams } 1210b2f46fd8SDan Williams 1211d3f3cf85SMathieu Lacage static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) 1212b2f46fd8SDan Williams { 1213b2f46fd8SDan Williams return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 1214b2f46fd8SDan Williams } 1215b2f46fd8SDan Williams 1216b2f46fd8SDan Williams /* dma_maxpq - reduce maxpq in the face of continued operations 1217b2f46fd8SDan Williams * @dma - dma device with PQ capability 1218b2f46fd8SDan Williams * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set 1219b2f46fd8SDan Williams * 1220b2f46fd8SDan Williams * When an engine does not support native continuation we need 3 extra 1221b2f46fd8SDan Williams * source slots to reuse P and Q with the following coefficients: 1222b2f46fd8SDan Williams * 1/ {00} * P : remove P from Q', but use it as a source for P' 1223b2f46fd8SDan Williams * 2/ {01} * Q : use Q to continue Q' calculation 1224b2f46fd8SDan Williams * 3/ {00} * Q : subtract Q from P' to cancel (2) 1225b2f46fd8SDan Williams * 1226b2f46fd8SDan Williams * In the case where P is disabled we only need 1 extra source: 1227b2f46fd8SDan Williams * 1/ {01} * Q : use Q to continue Q' calculation 1228b2f46fd8SDan Williams */ 1229b2f46fd8SDan Williams static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) 1230b2f46fd8SDan Williams { 1231b2f46fd8SDan Williams if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) 1232b2f46fd8SDan Williams return dma_dev_to_maxpq(dma); 12335f77dd85SAndy Shevchenko if (dmaf_p_disabled_continue(flags)) 1234b2f46fd8SDan Williams return dma_dev_to_maxpq(dma) - 1; 12355f77dd85SAndy Shevchenko if (dmaf_continue(flags)) 1236b2f46fd8SDan Williams return dma_dev_to_maxpq(dma) - 3; 1237b2f46fd8SDan Williams BUG(); 1238b2f46fd8SDan Williams } 1239b2f46fd8SDan Williams 124087d001efSMaxime Ripard static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, 124187d001efSMaxime Ripard size_t dir_icg) 124287d001efSMaxime Ripard { 124387d001efSMaxime Ripard if (inc) { 124487d001efSMaxime Ripard if (dir_icg) 124587d001efSMaxime Ripard return dir_icg; 12465f77dd85SAndy Shevchenko if (sgl) 124787d001efSMaxime Ripard return icg; 124887d001efSMaxime Ripard } 124987d001efSMaxime Ripard 125087d001efSMaxime Ripard return 0; 125187d001efSMaxime Ripard } 125287d001efSMaxime Ripard 125387d001efSMaxime Ripard static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt, 125487d001efSMaxime Ripard struct data_chunk *chunk) 125587d001efSMaxime Ripard { 125687d001efSMaxime Ripard return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl, 125787d001efSMaxime Ripard chunk->icg, chunk->dst_icg); 125887d001efSMaxime Ripard } 125987d001efSMaxime Ripard 126087d001efSMaxime Ripard static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt, 126187d001efSMaxime Ripard struct data_chunk *chunk) 126287d001efSMaxime Ripard { 126387d001efSMaxime Ripard return dmaengine_get_icg(xt->src_inc, xt->src_sgl, 126487d001efSMaxime Ripard chunk->icg, chunk->src_icg); 126587d001efSMaxime Ripard } 126687d001efSMaxime Ripard 1267c13c8260SChris Leech /* --- public DMA engine API --- */ 1268c13c8260SChris Leech 1269649274d9SDan Williams #ifdef CONFIG_DMA_ENGINE 1270209b84a8SDan Williams void dmaengine_get(void); 1271209b84a8SDan Williams void dmaengine_put(void); 1272649274d9SDan Williams #else 1273649274d9SDan Williams static inline void dmaengine_get(void) 1274649274d9SDan Williams { 1275649274d9SDan Williams } 1276649274d9SDan Williams static inline void dmaengine_put(void) 1277649274d9SDan Williams { 1278649274d9SDan Williams } 1279649274d9SDan Williams #endif 1280649274d9SDan Williams 1281729b5d1bSDan Williams #ifdef CONFIG_ASYNC_TX_DMA 1282729b5d1bSDan Williams #define async_dmaengine_get() dmaengine_get() 1283729b5d1bSDan Williams #define async_dmaengine_put() dmaengine_put() 12845fc6d897SDan Williams #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 1285138f4c35SDan Williams #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) 1286138f4c35SDan Williams #else 1287729b5d1bSDan Williams #define async_dma_find_channel(type) dma_find_channel(type) 12885fc6d897SDan Williams #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ 1289729b5d1bSDan Williams #else 1290729b5d1bSDan Williams static inline void async_dmaengine_get(void) 1291729b5d1bSDan Williams { 1292729b5d1bSDan Williams } 1293729b5d1bSDan Williams static inline void async_dmaengine_put(void) 1294729b5d1bSDan Williams { 1295729b5d1bSDan Williams } 1296729b5d1bSDan Williams static inline struct dma_chan * 1297729b5d1bSDan Williams async_dma_find_channel(enum dma_transaction_type type) 1298729b5d1bSDan Williams { 1299729b5d1bSDan Williams return NULL; 1300729b5d1bSDan Williams } 1301138f4c35SDan Williams #endif /* CONFIG_ASYNC_TX_DMA */ 13027405f74bSDan Williams void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 13037405f74bSDan Williams struct dma_chan *chan); 1304c13c8260SChris Leech 13050839875eSDan Williams static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) 13067405f74bSDan Williams { 1307636bdeaaSDan Williams tx->flags |= DMA_CTRL_ACK; 1308636bdeaaSDan Williams } 1309636bdeaaSDan Williams 1310ef560682SGuennadi Liakhovetski static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) 1311ef560682SGuennadi Liakhovetski { 1312ef560682SGuennadi Liakhovetski tx->flags &= ~DMA_CTRL_ACK; 1313ef560682SGuennadi Liakhovetski } 1314ef560682SGuennadi Liakhovetski 13150839875eSDan Williams static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) 1316636bdeaaSDan Williams { 13170839875eSDan Williams return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; 1318c13c8260SChris Leech } 1319c13c8260SChris Leech 13207405f74bSDan Williams #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) 13217405f74bSDan Williams static inline void 13227405f74bSDan Williams __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 13237405f74bSDan Williams { 13247405f74bSDan Williams set_bit(tx_type, dstp->bits); 13257405f74bSDan Williams } 13267405f74bSDan Williams 13270f571515SAtsushi Nemoto #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) 13280f571515SAtsushi Nemoto static inline void 13290f571515SAtsushi Nemoto __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 13300f571515SAtsushi Nemoto { 13310f571515SAtsushi Nemoto clear_bit(tx_type, dstp->bits); 13320f571515SAtsushi Nemoto } 13330f571515SAtsushi Nemoto 133433df8ca0SDan Williams #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 133533df8ca0SDan Williams static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 133633df8ca0SDan Williams { 133733df8ca0SDan Williams bitmap_zero(dstp->bits, DMA_TX_TYPE_END); 133833df8ca0SDan Williams } 133933df8ca0SDan Williams 13407405f74bSDan Williams #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 13417405f74bSDan Williams static inline int 13427405f74bSDan Williams __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) 13437405f74bSDan Williams { 13447405f74bSDan Williams return test_bit(tx_type, srcp->bits); 13457405f74bSDan Williams } 13467405f74bSDan Williams 13477405f74bSDan Williams #define for_each_dma_cap_mask(cap, mask) \ 1348e5a087fdSAkinobu Mita for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) 13497405f74bSDan Williams 1350c13c8260SChris Leech /** 13517405f74bSDan Williams * dma_async_issue_pending - flush pending transactions to HW 1352fe4ada2dSRandy Dunlap * @chan: target DMA channel 1353c13c8260SChris Leech * 1354c13c8260SChris Leech * This allows drivers to push copies to HW in batches, 1355c13c8260SChris Leech * reducing MMIO writes where possible. 1356c13c8260SChris Leech */ 13577405f74bSDan Williams static inline void dma_async_issue_pending(struct dma_chan *chan) 1358c13c8260SChris Leech { 1359ec8670f1SDan Williams chan->device->device_issue_pending(chan); 1360c13c8260SChris Leech } 1361c13c8260SChris Leech 1362c13c8260SChris Leech /** 13637405f74bSDan Williams * dma_async_is_tx_complete - poll for transaction completion 1364c13c8260SChris Leech * @chan: DMA channel 1365c13c8260SChris Leech * @cookie: transaction identifier to check status of 1366c13c8260SChris Leech * @last: returns last completed cookie, can be NULL 1367c13c8260SChris Leech * @used: returns last issued cookie, can be NULL 1368c13c8260SChris Leech * 1369c13c8260SChris Leech * If @last and @used are passed in, upon return they reflect the driver 1370c13c8260SChris Leech * internal state and can be used with dma_async_is_complete() to check 1371c13c8260SChris Leech * the status of multiple cookies without re-checking hardware state. 1372c13c8260SChris Leech */ 13737405f74bSDan Williams static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, 1374c13c8260SChris Leech dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) 1375c13c8260SChris Leech { 137607934481SLinus Walleij struct dma_tx_state state; 137707934481SLinus Walleij enum dma_status status; 137807934481SLinus Walleij 137907934481SLinus Walleij status = chan->device->device_tx_status(chan, cookie, &state); 138007934481SLinus Walleij if (last) 138107934481SLinus Walleij *last = state.last; 138207934481SLinus Walleij if (used) 138307934481SLinus Walleij *used = state.used; 138407934481SLinus Walleij return status; 1385c13c8260SChris Leech } 1386c13c8260SChris Leech 1387c13c8260SChris Leech /** 1388c13c8260SChris Leech * dma_async_is_complete - test a cookie against chan state 1389c13c8260SChris Leech * @cookie: transaction identifier to test status of 1390c13c8260SChris Leech * @last_complete: last know completed transaction 1391c13c8260SChris Leech * @last_used: last cookie value handed out 1392c13c8260SChris Leech * 1393e239345fSBartlomiej Zolnierkiewicz * dma_async_is_complete() is used in dma_async_is_tx_complete() 13948a5703f8SSebastian Siewior * the test logic is separated for lightweight testing of multiple cookies 1395c13c8260SChris Leech */ 1396c13c8260SChris Leech static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, 1397c13c8260SChris Leech dma_cookie_t last_complete, dma_cookie_t last_used) 1398c13c8260SChris Leech { 1399c13c8260SChris Leech if (last_complete <= last_used) { 1400c13c8260SChris Leech if ((cookie <= last_complete) || (cookie > last_used)) 1401adfedd9aSVinod Koul return DMA_COMPLETE; 1402c13c8260SChris Leech } else { 1403c13c8260SChris Leech if ((cookie <= last_complete) && (cookie > last_used)) 1404adfedd9aSVinod Koul return DMA_COMPLETE; 1405c13c8260SChris Leech } 1406c13c8260SChris Leech return DMA_IN_PROGRESS; 1407c13c8260SChris Leech } 1408c13c8260SChris Leech 1409bca34692SDan Williams static inline void 1410bca34692SDan Williams dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) 1411bca34692SDan Williams { 14123a92063bSAndy Shevchenko if (!st) 14133a92063bSAndy Shevchenko return; 14143a92063bSAndy Shevchenko 1415bca34692SDan Williams st->last = last; 1416bca34692SDan Williams st->used = used; 1417bca34692SDan Williams st->residue = residue; 1418bca34692SDan Williams } 1419bca34692SDan Williams 142007f2211eSDan Williams #ifdef CONFIG_DMA_ENGINE 14214a43f394SJon Mason struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 14224a43f394SJon Mason enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 142307f2211eSDan Williams enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 1424c50331e8SDan Williams void dma_issue_pending_all(void); 1425a53e28daSLars-Peter Clausen struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1426f5151311SBaolin Wang dma_filter_fn fn, void *fn_param, 1427f5151311SBaolin Wang struct device_node *np); 1428bef29ec5SMarkus Pargmann struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1429a8135d0dSPeter Ujfalusi 1430a8135d0dSPeter Ujfalusi struct dma_chan *dma_request_chan(struct device *dev, const char *name); 1431a8135d0dSPeter Ujfalusi struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); 1432a8135d0dSPeter Ujfalusi 14338f33d527SGuennadi Liakhovetski void dma_release_channel(struct dma_chan *chan); 1434fdb8df99SLaurent Pinchart int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); 143507f2211eSDan Williams #else 14364a43f394SJon Mason static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 14374a43f394SJon Mason { 14384a43f394SJon Mason return NULL; 14394a43f394SJon Mason } 14404a43f394SJon Mason static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 14414a43f394SJon Mason { 1442adfedd9aSVinod Koul return DMA_COMPLETE; 14434a43f394SJon Mason } 144407f2211eSDan Williams static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 144507f2211eSDan Williams { 1446adfedd9aSVinod Koul return DMA_COMPLETE; 144707f2211eSDan Williams } 1448c50331e8SDan Williams static inline void dma_issue_pending_all(void) 1449c50331e8SDan Williams { 14508f33d527SGuennadi Liakhovetski } 1451a53e28daSLars-Peter Clausen static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1452f5151311SBaolin Wang dma_filter_fn fn, 1453f5151311SBaolin Wang void *fn_param, 1454f5151311SBaolin Wang struct device_node *np) 14558f33d527SGuennadi Liakhovetski { 14568f33d527SGuennadi Liakhovetski return NULL; 14578f33d527SGuennadi Liakhovetski } 14589a6cecc8SJon Hunter static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 1459bef29ec5SMarkus Pargmann const char *name) 14609a6cecc8SJon Hunter { 1461d18d5f59SVinod Koul return NULL; 14629a6cecc8SJon Hunter } 1463a8135d0dSPeter Ujfalusi static inline struct dma_chan *dma_request_chan(struct device *dev, 1464a8135d0dSPeter Ujfalusi const char *name) 1465a8135d0dSPeter Ujfalusi { 1466a8135d0dSPeter Ujfalusi return ERR_PTR(-ENODEV); 1467a8135d0dSPeter Ujfalusi } 1468a8135d0dSPeter Ujfalusi static inline struct dma_chan *dma_request_chan_by_mask( 1469a8135d0dSPeter Ujfalusi const dma_cap_mask_t *mask) 1470a8135d0dSPeter Ujfalusi { 1471a8135d0dSPeter Ujfalusi return ERR_PTR(-ENODEV); 1472a8135d0dSPeter Ujfalusi } 14738f33d527SGuennadi Liakhovetski static inline void dma_release_channel(struct dma_chan *chan) 14748f33d527SGuennadi Liakhovetski { 1475c50331e8SDan Williams } 1476fdb8df99SLaurent Pinchart static inline int dma_get_slave_caps(struct dma_chan *chan, 1477fdb8df99SLaurent Pinchart struct dma_slave_caps *caps) 1478fdb8df99SLaurent Pinchart { 1479fdb8df99SLaurent Pinchart return -ENXIO; 1480fdb8df99SLaurent Pinchart } 148107f2211eSDan Williams #endif 1482c13c8260SChris Leech 1483a8135d0dSPeter Ujfalusi #define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) 1484a8135d0dSPeter Ujfalusi 148527242021SVinod Koul static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) 148627242021SVinod Koul { 148727242021SVinod Koul struct dma_slave_caps caps; 148853a256a9SLukas Wunner int ret; 148927242021SVinod Koul 149053a256a9SLukas Wunner ret = dma_get_slave_caps(tx->chan, &caps); 149153a256a9SLukas Wunner if (ret) 149253a256a9SLukas Wunner return ret; 149327242021SVinod Koul 14943a92063bSAndy Shevchenko if (!caps.descriptor_reuse) 14953a92063bSAndy Shevchenko return -EPERM; 14963a92063bSAndy Shevchenko 149727242021SVinod Koul tx->flags |= DMA_CTRL_REUSE; 149827242021SVinod Koul return 0; 149927242021SVinod Koul } 150027242021SVinod Koul 150127242021SVinod Koul static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) 150227242021SVinod Koul { 150327242021SVinod Koul tx->flags &= ~DMA_CTRL_REUSE; 150427242021SVinod Koul } 150527242021SVinod Koul 150627242021SVinod Koul static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) 150727242021SVinod Koul { 150827242021SVinod Koul return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE; 150927242021SVinod Koul } 151027242021SVinod Koul 151127242021SVinod Koul static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) 151227242021SVinod Koul { 151327242021SVinod Koul /* this is supported for reusable desc, so check that */ 15143a92063bSAndy Shevchenko if (!dmaengine_desc_test_reuse(desc)) 151527242021SVinod Koul return -EPERM; 15163a92063bSAndy Shevchenko 15173a92063bSAndy Shevchenko return desc->desc_free(desc); 151827242021SVinod Koul } 151927242021SVinod Koul 1520c13c8260SChris Leech /* --- DMA device --- */ 1521c13c8260SChris Leech 1522c13c8260SChris Leech int dma_async_device_register(struct dma_device *device); 1523f39b948dSHuang Shijie int dmaenginem_async_device_register(struct dma_device *device); 1524c13c8260SChris Leech void dma_async_device_unregister(struct dma_device *device); 1525e81274cdSDave Jiang int dma_async_device_channel_register(struct dma_device *device, 1526e81274cdSDave Jiang struct dma_chan *chan); 1527e81274cdSDave Jiang void dma_async_device_channel_unregister(struct dma_device *device, 1528e81274cdSDave Jiang struct dma_chan *chan); 152907f2211eSDan Williams void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 1530f5151311SBaolin Wang #define dma_request_channel(mask, x, y) \ 1531f5151311SBaolin Wang __dma_request_channel(&(mask), x, y, NULL) 1532864ef69bSMatt Porter 1533864ef69bSMatt Porter static inline struct dma_chan 153471ca5b78SGeert Uytterhoeven *dma_request_slave_channel_compat(const dma_cap_mask_t mask, 1535a53e28daSLars-Peter Clausen dma_filter_fn fn, void *fn_param, 15361dc04288SJarkko Nikula struct device *dev, const char *name) 1537864ef69bSMatt Porter { 1538864ef69bSMatt Porter struct dma_chan *chan; 1539864ef69bSMatt Porter 1540864ef69bSMatt Porter chan = dma_request_slave_channel(dev, name); 1541864ef69bSMatt Porter if (chan) 1542864ef69bSMatt Porter return chan; 1543864ef69bSMatt Porter 15447dfffb95SGeert Uytterhoeven if (!fn || !fn_param) 15457dfffb95SGeert Uytterhoeven return NULL; 15467dfffb95SGeert Uytterhoeven 154771ca5b78SGeert Uytterhoeven return __dma_request_channel(&mask, fn, fn_param, NULL); 1548864ef69bSMatt Porter } 1549816ebf48SPeter Ujfalusi 1550816ebf48SPeter Ujfalusi static inline char * 1551816ebf48SPeter Ujfalusi dmaengine_get_direction_text(enum dma_transfer_direction dir) 1552816ebf48SPeter Ujfalusi { 1553816ebf48SPeter Ujfalusi switch (dir) { 1554816ebf48SPeter Ujfalusi case DMA_DEV_TO_MEM: 1555816ebf48SPeter Ujfalusi return "DEV_TO_MEM"; 1556816ebf48SPeter Ujfalusi case DMA_MEM_TO_DEV: 1557816ebf48SPeter Ujfalusi return "MEM_TO_DEV"; 1558816ebf48SPeter Ujfalusi case DMA_MEM_TO_MEM: 1559816ebf48SPeter Ujfalusi return "MEM_TO_MEM"; 1560816ebf48SPeter Ujfalusi case DMA_DEV_TO_DEV: 1561816ebf48SPeter Ujfalusi return "DEV_TO_DEV"; 1562816ebf48SPeter Ujfalusi default: 1563816ebf48SPeter Ujfalusi return "invalid"; 1564de5506e1SChris Leech } 1565*1873300aSAndy Shevchenko } 1566c13c8260SChris Leech #endif /* DMAENGINE_H */ 1567