xref: /linux-6.15/include/linux/dmaengine.h (revision b9014a10)
19ab65affSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2c13c8260SChris Leech /*
3c13c8260SChris Leech  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4c13c8260SChris Leech  */
5d2ebfb33SRussell King - ARM Linux #ifndef LINUX_DMAENGINE_H
6d2ebfb33SRussell King - ARM Linux #define LINUX_DMAENGINE_H
71c0f16e5SDavid Woodhouse 
8c13c8260SChris Leech #include <linux/device.h>
90ad7c000SStephen Warren #include <linux/err.h>
10c13c8260SChris Leech #include <linux/uio.h>
11187f1882SPaul Gortmaker #include <linux/bug.h>
1290b44f8fSVinod Koul #include <linux/scatterlist.h>
13a8efa9d6SPaul Gortmaker #include <linux/bitmap.h>
14dcc043dcSViresh Kumar #include <linux/types.h>
15a8efa9d6SPaul Gortmaker #include <asm/page.h>
16b7f080cfSAlexey Dobriyan 
17c13c8260SChris Leech /**
18fe4ada2dSRandy Dunlap  * typedef dma_cookie_t - an opaque DMA cookie
19c13c8260SChris Leech  *
20c13c8260SChris Leech  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
21c13c8260SChris Leech  */
22c13c8260SChris Leech typedef s32 dma_cookie_t;
2376bd061fSSteven J. Magnani #define DMA_MIN_COOKIE	1
24c13c8260SChris Leech 
dma_submit_error(dma_cookie_t cookie)2571ea1483SDan Carpenter static inline int dma_submit_error(dma_cookie_t cookie)
2671ea1483SDan Carpenter {
2771ea1483SDan Carpenter 	return cookie < 0 ? cookie : 0;
2871ea1483SDan Carpenter }
29c13c8260SChris Leech 
30c13c8260SChris Leech /**
31c13c8260SChris Leech  * enum dma_status - DMA transaction status
32adfedd9aSVinod Koul  * @DMA_COMPLETE: transaction completed
33c13c8260SChris Leech  * @DMA_IN_PROGRESS: transaction not yet processed
3407934481SLinus Walleij  * @DMA_PAUSED: transaction is paused
35c13c8260SChris Leech  * @DMA_ERROR: transaction failed
36c13c8260SChris Leech  */
37c13c8260SChris Leech enum dma_status {
387db5f727SVinod Koul 	DMA_COMPLETE,
39c13c8260SChris Leech 	DMA_IN_PROGRESS,
4007934481SLinus Walleij 	DMA_PAUSED,
41c13c8260SChris Leech 	DMA_ERROR,
4247ec7f09SDave Jiang 	DMA_OUT_OF_ORDER,
43c13c8260SChris Leech };
44c13c8260SChris Leech 
45c13c8260SChris Leech /**
467405f74bSDan Williams  * enum dma_transaction_type - DMA transaction types/indexes
47138f4c35SDan Williams  *
48138f4c35SDan Williams  * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
49138f4c35SDan Williams  * automatically set as dma devices are registered.
507405f74bSDan Williams  */
517405f74bSDan Williams enum dma_transaction_type {
527405f74bSDan Williams 	DMA_MEMCPY,
537405f74bSDan Williams 	DMA_XOR,
54b2f46fd8SDan Williams 	DMA_PQ,
55099f53cbSDan Williams 	DMA_XOR_VAL,
56099f53cbSDan Williams 	DMA_PQ_VAL,
574983a501SMaxime Ripard 	DMA_MEMSET,
5850c7cd2bSMaxime Ripard 	DMA_MEMSET_SG,
597405f74bSDan Williams 	DMA_INTERRUPT,
6059b5ec21SDan Williams 	DMA_PRIVATE,
61138f4c35SDan Williams 	DMA_ASYNC_TX,
62dc0ee643SHaavard Skinnemoen 	DMA_SLAVE,
63782bc950SSascha Hauer 	DMA_CYCLIC,
64b14dab79SJassi Brar 	DMA_INTERLEAVE,
6547ec7f09SDave Jiang 	DMA_COMPLETION_NO_ORDER,
669c8ebd8bSLaurent Pinchart 	DMA_REPEAT,
679c8ebd8bSLaurent Pinchart 	DMA_LOAD_EOT,
687405f74bSDan Williams /* last transaction type for creation of the capabilities mask */
69b14dab79SJassi Brar 	DMA_TX_TYPE_END,
70b14dab79SJassi Brar };
71dc0ee643SHaavard Skinnemoen 
7249920bc6SVinod Koul /**
7349920bc6SVinod Koul  * enum dma_transfer_direction - dma transfer mode and direction indicator
7449920bc6SVinod Koul  * @DMA_MEM_TO_MEM: Async/Memcpy mode
7549920bc6SVinod Koul  * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
7649920bc6SVinod Koul  * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
7749920bc6SVinod Koul  * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
7849920bc6SVinod Koul  */
7949920bc6SVinod Koul enum dma_transfer_direction {
8049920bc6SVinod Koul 	DMA_MEM_TO_MEM,
8149920bc6SVinod Koul 	DMA_MEM_TO_DEV,
8249920bc6SVinod Koul 	DMA_DEV_TO_MEM,
8349920bc6SVinod Koul 	DMA_DEV_TO_DEV,
8462268ce9SShawn Guo 	DMA_TRANS_NONE,
8549920bc6SVinod Koul };
867405f74bSDan Williams 
87790fb995SRandy Dunlap /*
88b14dab79SJassi Brar  * Interleaved Transfer Request
89b14dab79SJassi Brar  * ----------------------------
9020d60f63SMaciej Grochowski  * A chunk is collection of contiguous bytes to be transferred.
91b14dab79SJassi Brar  * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
92b14dab79SJassi Brar  * ICGs may or may not change between chunks.
93b14dab79SJassi Brar  * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
94b14dab79SJassi Brar  *  that when repeated an integral number of times, specifies the transfer.
95b14dab79SJassi Brar  * A transfer template is specification of a Frame, the number of times
96b14dab79SJassi Brar  *  it is to be repeated and other per-transfer attributes.
97b14dab79SJassi Brar  *
98b14dab79SJassi Brar  * Practically, a client driver would have ready a template for each
99b14dab79SJassi Brar  *  type of transfer it is going to need during its lifetime and
100b14dab79SJassi Brar  *  set only 'src_start' and 'dst_start' before submitting the requests.
101b14dab79SJassi Brar  *
102b14dab79SJassi Brar  *
103b14dab79SJassi Brar  *  |      Frame-1        |       Frame-2       | ~ |       Frame-'numf'  |
104b14dab79SJassi Brar  *  |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
105b14dab79SJassi Brar  *
106b14dab79SJassi Brar  *    ==  Chunk size
107b14dab79SJassi Brar  *    ... ICG
108b14dab79SJassi Brar  */
109b14dab79SJassi Brar 
110b14dab79SJassi Brar /**
111b14dab79SJassi Brar  * struct data_chunk - Element of scatter-gather list that makes a frame.
112b14dab79SJassi Brar  * @size: Number of bytes to read from source.
113b14dab79SJassi Brar  *	  size_dst := fn(op, size_src), so doesn't mean much for destination.
114b14dab79SJassi Brar  * @icg: Number of bytes to jump after last src/dst address of this
115b14dab79SJassi Brar  *	 chunk and before first src/dst address for next chunk.
116b14dab79SJassi Brar  *	 Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
117b14dab79SJassi Brar  *	 Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
118e1031dc1SMaxime Ripard  * @dst_icg: Number of bytes to jump after last dst address of this
119e1031dc1SMaxime Ripard  *	 chunk and before the first dst address for next chunk.
120e1031dc1SMaxime Ripard  *	 Ignored if dst_inc is true and dst_sgl is false.
121e1031dc1SMaxime Ripard  * @src_icg: Number of bytes to jump after last src address of this
122e1031dc1SMaxime Ripard  *	 chunk and before the first src address for next chunk.
123e1031dc1SMaxime Ripard  *	 Ignored if src_inc is true and src_sgl is false.
124b14dab79SJassi Brar  */
125b14dab79SJassi Brar struct data_chunk {
126b14dab79SJassi Brar 	size_t size;
127b14dab79SJassi Brar 	size_t icg;
128e1031dc1SMaxime Ripard 	size_t dst_icg;
129e1031dc1SMaxime Ripard 	size_t src_icg;
130b14dab79SJassi Brar };
131b14dab79SJassi Brar 
132b14dab79SJassi Brar /**
133b14dab79SJassi Brar  * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
134b14dab79SJassi Brar  *	 and attributes.
135b14dab79SJassi Brar  * @src_start: Bus address of source for the first chunk.
136b14dab79SJassi Brar  * @dst_start: Bus address of destination for the first chunk.
137b14dab79SJassi Brar  * @dir: Specifies the type of Source and Destination.
138b14dab79SJassi Brar  * @src_inc: If the source address increments after reading from it.
139b14dab79SJassi Brar  * @dst_inc: If the destination address increments after writing to it.
140b14dab79SJassi Brar  * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
141b14dab79SJassi Brar  *		Otherwise, source is read contiguously (icg ignored).
142b14dab79SJassi Brar  *		Ignored if src_inc is false.
143b14dab79SJassi Brar  * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
144b14dab79SJassi Brar  *		Otherwise, destination is filled contiguously (icg ignored).
145b14dab79SJassi Brar  *		Ignored if dst_inc is false.
146b14dab79SJassi Brar  * @numf: Number of frames in this template.
147b14dab79SJassi Brar  * @frame_size: Number of chunks in a frame i.e, size of sgl[].
148b14dab79SJassi Brar  * @sgl: Array of {chunk,icg} pairs that make up a frame.
149b14dab79SJassi Brar  */
150b14dab79SJassi Brar struct dma_interleaved_template {
151b14dab79SJassi Brar 	dma_addr_t src_start;
152b14dab79SJassi Brar 	dma_addr_t dst_start;
153b14dab79SJassi Brar 	enum dma_transfer_direction dir;
154b14dab79SJassi Brar 	bool src_inc;
155b14dab79SJassi Brar 	bool dst_inc;
156b14dab79SJassi Brar 	bool src_sgl;
157b14dab79SJassi Brar 	bool dst_sgl;
158b14dab79SJassi Brar 	size_t numf;
159b14dab79SJassi Brar 	size_t frame_size;
160466f966bSGustavo A. R. Silva 	struct data_chunk sgl[];
161b14dab79SJassi Brar };
162b14dab79SJassi Brar 
163b14dab79SJassi Brar /**
1645878853fSPaul Cercueil  * struct dma_vec - DMA vector
1655878853fSPaul Cercueil  * @addr: Bus address of the start of the vector
1665878853fSPaul Cercueil  * @len: Length in bytes of the DMA vector
1675878853fSPaul Cercueil  */
1685878853fSPaul Cercueil struct dma_vec {
1695878853fSPaul Cercueil 	dma_addr_t addr;
1705878853fSPaul Cercueil 	size_t len;
1715878853fSPaul Cercueil };
1725878853fSPaul Cercueil 
1735878853fSPaul Cercueil /**
174636bdeaaSDan Williams  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
175636bdeaaSDan Williams  *  control completion, and communicate status.
176d4c56f97SDan Williams  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
177d4c56f97SDan Williams  *  this transaction
178a88f6667SGuennadi Liakhovetski  * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
1792be90e91SRandy Dunlap  *  acknowledges receipt, i.e. has a chance to establish any dependency
180b2f46fd8SDan Williams  *  chains
181b2f46fd8SDan Williams  * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
182b2f46fd8SDan Williams  * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
183b2f46fd8SDan Williams  * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
184b2f46fd8SDan Williams  *  sources that were the result of a previous operation, in the case of a PQ
185b2f46fd8SDan Williams  *  operation it continues the calculation with new sources
1860403e382SDan Williams  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
1870403e382SDan Williams  *  on the result of this operation
18827242021SVinod Koul  * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
18927242021SVinod Koul  *  cleared or freed
1903e00ab4aSAbhishek Sahu  * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
1913e00ab4aSAbhishek Sahu  *  data and the descriptor should be in different format from normal
1923e00ab4aSAbhishek Sahu  *  data descriptors.
1939c8ebd8bSLaurent Pinchart  * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
1949c8ebd8bSLaurent Pinchart  *  repeated when it ends until a transaction is issued on the same channel
1959c8ebd8bSLaurent Pinchart  *  with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
1969c8ebd8bSLaurent Pinchart  *  interleaved transactions and is ignored for all other transaction types.
1979c8ebd8bSLaurent Pinchart  * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
1989c8ebd8bSLaurent Pinchart  *  active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
1999c8ebd8bSLaurent Pinchart  *  repeated transaction ends. Not setting this flag when the previously queued
2009c8ebd8bSLaurent Pinchart  *  transaction is marked with DMA_PREP_REPEAT will cause the new transaction
2019c8ebd8bSLaurent Pinchart  *  to never be processed and stay in the issued queue forever. The flag is
2029c8ebd8bSLaurent Pinchart  *  ignored if the previous transaction is not a repeated transaction.
203d4c56f97SDan Williams  */
204636bdeaaSDan Williams enum dma_ctrl_flags {
205d4c56f97SDan Williams 	DMA_PREP_INTERRUPT = (1 << 0),
206636bdeaaSDan Williams 	DMA_CTRL_ACK = (1 << 1),
2070776ae7bSBartlomiej Zolnierkiewicz 	DMA_PREP_PQ_DISABLE_P = (1 << 2),
2080776ae7bSBartlomiej Zolnierkiewicz 	DMA_PREP_PQ_DISABLE_Q = (1 << 3),
2090776ae7bSBartlomiej Zolnierkiewicz 	DMA_PREP_CONTINUE = (1 << 4),
2100776ae7bSBartlomiej Zolnierkiewicz 	DMA_PREP_FENCE = (1 << 5),
21127242021SVinod Koul 	DMA_CTRL_REUSE = (1 << 6),
2123e00ab4aSAbhishek Sahu 	DMA_PREP_CMD = (1 << 7),
2139c8ebd8bSLaurent Pinchart 	DMA_PREP_REPEAT = (1 << 8),
2149c8ebd8bSLaurent Pinchart 	DMA_PREP_LOAD_EOT = (1 << 9),
215d4c56f97SDan Williams };
216d4c56f97SDan Williams 
217d4c56f97SDan Williams /**
218ad283ea4SDan Williams  * enum sum_check_bits - bit position of pq_check_flags
219ad283ea4SDan Williams  */
220ad283ea4SDan Williams enum sum_check_bits {
221ad283ea4SDan Williams 	SUM_CHECK_P = 0,
222ad283ea4SDan Williams 	SUM_CHECK_Q = 1,
223ad283ea4SDan Williams };
224ad283ea4SDan Williams 
225ad283ea4SDan Williams /**
226790fb995SRandy Dunlap  * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations
227ad283ea4SDan Williams  * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
228ad283ea4SDan Williams  * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
229ad283ea4SDan Williams  */
230ad283ea4SDan Williams enum sum_check_flags {
231ad283ea4SDan Williams 	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
232ad283ea4SDan Williams 	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
233ad283ea4SDan Williams };
234ad283ea4SDan Williams 
235ad283ea4SDan Williams 
236ad283ea4SDan Williams /**
2377405f74bSDan Williams  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
2387405f74bSDan Williams  * See linux/cpumask.h
2397405f74bSDan Williams  */
2407405f74bSDan Williams typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
2417405f74bSDan Williams 
2427405f74bSDan Williams /**
2434db8fd32SPeter Ujfalusi  * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
2444db8fd32SPeter Ujfalusi  * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
2454db8fd32SPeter Ujfalusi  *  client driver and it is attached (via the dmaengine_desc_attach_metadata()
2464db8fd32SPeter Ujfalusi  *  helper) to the descriptor.
2474db8fd32SPeter Ujfalusi  *
2484db8fd32SPeter Ujfalusi  * Client drivers interested to use this mode can follow:
2494db8fd32SPeter Ujfalusi  * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
2504db8fd32SPeter Ujfalusi  *   1. prepare the descriptor (dmaengine_prep_*)
2514db8fd32SPeter Ujfalusi  *	construct the metadata in the client's buffer
2524db8fd32SPeter Ujfalusi  *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
2534db8fd32SPeter Ujfalusi  *	descriptor
2544db8fd32SPeter Ujfalusi  *   3. submit the transfer
2554db8fd32SPeter Ujfalusi  * - DMA_DEV_TO_MEM:
2564db8fd32SPeter Ujfalusi  *   1. prepare the descriptor (dmaengine_prep_*)
2574db8fd32SPeter Ujfalusi  *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
2584db8fd32SPeter Ujfalusi  *	descriptor
2594db8fd32SPeter Ujfalusi  *   3. submit the transfer
2604db8fd32SPeter Ujfalusi  *   4. when the transfer is completed, the metadata should be available in the
2614db8fd32SPeter Ujfalusi  *	attached buffer
2624db8fd32SPeter Ujfalusi  *
2634db8fd32SPeter Ujfalusi  * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
2644db8fd32SPeter Ujfalusi  *  driver. The client driver can ask for the pointer, maximum size and the
2654db8fd32SPeter Ujfalusi  *  currently used size of the metadata and can directly update or read it.
2664db8fd32SPeter Ujfalusi  *  dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
2674db8fd32SPeter Ujfalusi  *  provided as helper functions.
2684db8fd32SPeter Ujfalusi  *
2694db8fd32SPeter Ujfalusi  *  Note: the metadata area for the descriptor is no longer valid after the
2704db8fd32SPeter Ujfalusi  *  transfer has been completed (valid up to the point when the completion
2714db8fd32SPeter Ujfalusi  *  callback returns if used).
2724db8fd32SPeter Ujfalusi  *
2734db8fd32SPeter Ujfalusi  * Client drivers interested to use this mode can follow:
2744db8fd32SPeter Ujfalusi  * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
2754db8fd32SPeter Ujfalusi  *   1. prepare the descriptor (dmaengine_prep_*)
2764db8fd32SPeter Ujfalusi  *   2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
2774db8fd32SPeter Ujfalusi  *	metadata area
2784db8fd32SPeter Ujfalusi  *   3. update the metadata at the pointer
2794db8fd32SPeter Ujfalusi  *   4. use dmaengine_desc_set_metadata_len()  to tell the DMA engine the amount
2804db8fd32SPeter Ujfalusi  *	of data the client has placed into the metadata buffer
2814db8fd32SPeter Ujfalusi  *   5. submit the transfer
2824db8fd32SPeter Ujfalusi  * - DMA_DEV_TO_MEM:
2834db8fd32SPeter Ujfalusi  *   1. prepare the descriptor (dmaengine_prep_*)
2844db8fd32SPeter Ujfalusi  *   2. submit the transfer
2854db8fd32SPeter Ujfalusi  *   3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
2864db8fd32SPeter Ujfalusi  *	pointer to the engine's metadata area
2874db8fd32SPeter Ujfalusi  *   4. Read out the metadata from the pointer
2884db8fd32SPeter Ujfalusi  *
289790fb995SRandy Dunlap  * Warning: the two modes are not compatible and clients must use one mode for a
2904db8fd32SPeter Ujfalusi  * descriptor.
2914db8fd32SPeter Ujfalusi  */
2924db8fd32SPeter Ujfalusi enum dma_desc_metadata_mode {
2934db8fd32SPeter Ujfalusi 	DESC_METADATA_NONE = 0,
2944db8fd32SPeter Ujfalusi 	DESC_METADATA_CLIENT = BIT(0),
2954db8fd32SPeter Ujfalusi 	DESC_METADATA_ENGINE = BIT(1),
2964db8fd32SPeter Ujfalusi };
2974db8fd32SPeter Ujfalusi 
298acfbb191SAndy Shevchenko /**
299acfbb191SAndy Shevchenko  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
300acfbb191SAndy Shevchenko  * @memcpy_count: transaction counter
301acfbb191SAndy Shevchenko  * @bytes_transferred: byte counter
302acfbb191SAndy Shevchenko  */
303c13c8260SChris Leech struct dma_chan_percpu {
304c13c8260SChris Leech 	/* stats */
305c13c8260SChris Leech 	unsigned long memcpy_count;
306c13c8260SChris Leech 	unsigned long bytes_transferred;
307c13c8260SChris Leech };
308c13c8260SChris Leech 
309c13c8260SChris Leech /**
31056f13c0dSPeter Ujfalusi  * struct dma_router - DMA router structure
31156f13c0dSPeter Ujfalusi  * @dev: pointer to the DMA router device
31256f13c0dSPeter Ujfalusi  * @route_free: function to be called when the route can be disconnected
31356f13c0dSPeter Ujfalusi  */
31456f13c0dSPeter Ujfalusi struct dma_router {
31556f13c0dSPeter Ujfalusi 	struct device *dev;
31656f13c0dSPeter Ujfalusi 	void (*route_free)(struct device *dev, void *route_data);
31756f13c0dSPeter Ujfalusi };
31856f13c0dSPeter Ujfalusi 
31956f13c0dSPeter Ujfalusi /**
320c13c8260SChris Leech  * struct dma_chan - devices supply DMA channels, clients use them
321fe4ada2dSRandy Dunlap  * @device: ptr to the dma device who supplies this channel, always !%NULL
32271723a96SGeert Uytterhoeven  * @slave: ptr to the device using this channel
323c13c8260SChris Leech  * @cookie: last cookie value returned to client
3244d4e58deSRussell King - ARM Linux  * @completed_cookie: last completed cookie for this channel
325fe4ada2dSRandy Dunlap  * @chan_id: channel ID for sysfs
32641d5e59cSDan Williams  * @dev: class device for sysfs
32771723a96SGeert Uytterhoeven  * @name: backlink name for sysfs
328e937cc1dSPeter Ujfalusi  * @dbg_client_name: slave name for debugfs in format:
329e937cc1dSPeter Ujfalusi  *	dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
330c13c8260SChris Leech  * @device_node: used to add this to the device chan list
331c13c8260SChris Leech  * @local: per-cpu pointer to a struct dma_chan_percpu
332868d2ee2SVinod Koul  * @client_count: how many clients are using this channel
333bec08513SDan Williams  * @table_count: number of appearances in the mem-to-mem allocation table
33456f13c0dSPeter Ujfalusi  * @router: pointer to the DMA router structure
33556f13c0dSPeter Ujfalusi  * @route_data: channel specific data for the router
336287d8592SDan Williams  * @private: private data for certain client-channel associations
337c13c8260SChris Leech  */
338c13c8260SChris Leech struct dma_chan {
339c13c8260SChris Leech 	struct dma_device *device;
34071723a96SGeert Uytterhoeven 	struct device *slave;
341c13c8260SChris Leech 	dma_cookie_t cookie;
3424d4e58deSRussell King - ARM Linux 	dma_cookie_t completed_cookie;
343c13c8260SChris Leech 
344c13c8260SChris Leech 	/* sysfs */
345c13c8260SChris Leech 	int chan_id;
34641d5e59cSDan Williams 	struct dma_chan_dev *dev;
34771723a96SGeert Uytterhoeven 	const char *name;
348e937cc1dSPeter Ujfalusi #ifdef CONFIG_DEBUG_FS
349e937cc1dSPeter Ujfalusi 	char *dbg_client_name;
350e937cc1dSPeter Ujfalusi #endif
351c13c8260SChris Leech 
352c13c8260SChris Leech 	struct list_head device_node;
353a29d8b8eSTejun Heo 	struct dma_chan_percpu __percpu *local;
3547cc5bf9aSDan Williams 	int client_count;
355bec08513SDan Williams 	int table_count;
35656f13c0dSPeter Ujfalusi 
35756f13c0dSPeter Ujfalusi 	/* DMA router */
35856f13c0dSPeter Ujfalusi 	struct dma_router *router;
35956f13c0dSPeter Ujfalusi 	void *route_data;
36056f13c0dSPeter Ujfalusi 
361287d8592SDan Williams 	void *private;
362c13c8260SChris Leech };
363c13c8260SChris Leech 
36441d5e59cSDan Williams /**
36541d5e59cSDan Williams  * struct dma_chan_dev - relate sysfs device node to backing channel device
366868d2ee2SVinod Koul  * @chan: driver channel device
367868d2ee2SVinod Koul  * @device: sysfs device
368868d2ee2SVinod Koul  * @dev_id: parent dma_device dev_id
369ab650ef6SPeter Ujfalusi  * @chan_dma_dev: The channel is using custom/different dma-mapping
370ab650ef6SPeter Ujfalusi  * compared to the parent dma_device
37141d5e59cSDan Williams  */
37241d5e59cSDan Williams struct dma_chan_dev {
37341d5e59cSDan Williams 	struct dma_chan *chan;
37441d5e59cSDan Williams 	struct device device;
375864498aaSDan Williams 	int dev_id;
376ab650ef6SPeter Ujfalusi 	bool chan_dma_dev;
37741d5e59cSDan Williams };
37841d5e59cSDan Williams 
379c156d0a5SLinus Walleij /**
380ba730340SAlexander Popov  * enum dma_slave_buswidth - defines bus width of the DMA slave
381c156d0a5SLinus Walleij  * device, source or target buses
382c156d0a5SLinus Walleij  */
383c156d0a5SLinus Walleij enum dma_slave_buswidth {
384c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
385c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
386c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
38793c6ee94SPeter Ujfalusi 	DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
388c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
389c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
390534a7298SLaurent Pinchart 	DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
391534a7298SLaurent Pinchart 	DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
392534a7298SLaurent Pinchart 	DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
393ab959c7dSBiju Das 	DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
394c156d0a5SLinus Walleij };
395c156d0a5SLinus Walleij 
396c156d0a5SLinus Walleij /**
397c156d0a5SLinus Walleij  * struct dma_slave_config - dma slave channel runtime config
398c156d0a5SLinus Walleij  * @direction: whether the data shall go in or out on this slave
399397321f4SAlexander Popov  * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
400d9ff958bSLaurent Pinchart  * legal values. DEPRECATED, drivers should use the direction argument
401d9ff958bSLaurent Pinchart  * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
402d9ff958bSLaurent Pinchart  * the dir field in the dma_interleaved_template structure.
403c156d0a5SLinus Walleij  * @src_addr: this is the physical address where DMA slave data
404c156d0a5SLinus Walleij  * should be read (RX), if the source is memory this argument is
405c156d0a5SLinus Walleij  * ignored.
406c156d0a5SLinus Walleij  * @dst_addr: this is the physical address where DMA slave data
40737fe4605SSerge Semin  * should be written (TX), if the destination is memory this argument
408c156d0a5SLinus Walleij  * is ignored.
409c156d0a5SLinus Walleij  * @src_addr_width: this is the width in bytes of the source (RX)
410c156d0a5SLinus Walleij  * register where DMA data shall be read. If the source
411c156d0a5SLinus Walleij  * is memory this may be ignored depending on architecture.
412ab959c7dSBiju Das  * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
413c156d0a5SLinus Walleij  * @dst_addr_width: same as src_addr_width but for destination
414c156d0a5SLinus Walleij  * target (TX) mutatis mutandis.
415c156d0a5SLinus Walleij  * @src_maxburst: the maximum number of words (note: words, as in
416c156d0a5SLinus Walleij  * units of the src_addr_width member, not bytes) that can be sent
417c156d0a5SLinus Walleij  * in one burst to the device. Typically something like half the
418c156d0a5SLinus Walleij  * FIFO depth on I/O peripherals so you don't overflow it. This
419c156d0a5SLinus Walleij  * may or may not be applicable on memory sources.
420c156d0a5SLinus Walleij  * @dst_maxburst: same as src_maxburst but for destination target
421c156d0a5SLinus Walleij  * mutatis mutandis.
42254cd2558SPeter Ujfalusi  * @src_port_window_size: The length of the register area in words the data need
42354cd2558SPeter Ujfalusi  * to be accessed on the device side. It is only used for devices which is using
42454cd2558SPeter Ujfalusi  * an area instead of a single register to receive the data. Typically the DMA
42554cd2558SPeter Ujfalusi  * loops in this area in order to transfer the data.
42654cd2558SPeter Ujfalusi  * @dst_port_window_size: same as src_port_window_size but for the destination
42754cd2558SPeter Ujfalusi  * port.
428dcc043dcSViresh Kumar  * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
429dcc043dcSViresh Kumar  * with 'true' if peripheral should be flow controller. Direction will be
430dcc043dcSViresh Kumar  * selected at Runtime.
431e7bbb7acSVinod Koul  * @peripheral_config: peripheral configuration for programming peripheral
432e7bbb7acSVinod Koul  * for dmaengine transfer
433e7bbb7acSVinod Koul  * @peripheral_size: peripheral configuration buffer size
434c156d0a5SLinus Walleij  *
435c156d0a5SLinus Walleij  * This struct is passed in as configuration data to a DMA engine
436c156d0a5SLinus Walleij  * in order to set up a certain channel for DMA transport at runtime.
437c156d0a5SLinus Walleij  * The DMA device/engine has to provide support for an additional
4382c44ad91SMaxime Ripard  * callback in the dma_device structure, device_config and this struct
4392c44ad91SMaxime Ripard  * will then be passed in as an argument to the function.
440c156d0a5SLinus Walleij  *
4417cbccb55SLars-Peter Clausen  * The rationale for adding configuration information to this struct is as
4427cbccb55SLars-Peter Clausen  * follows: if it is likely that more than one DMA slave controllers in
4437cbccb55SLars-Peter Clausen  * the world will support the configuration option, then make it generic.
4447cbccb55SLars-Peter Clausen  * If not: if it is fixed so that it be sent in static from the platform
4457cbccb55SLars-Peter Clausen  * data, then prefer to do that.
446c156d0a5SLinus Walleij  */
447c156d0a5SLinus Walleij struct dma_slave_config {
44849920bc6SVinod Koul 	enum dma_transfer_direction direction;
44995756320SVinod Koul 	phys_addr_t src_addr;
45095756320SVinod Koul 	phys_addr_t dst_addr;
451c156d0a5SLinus Walleij 	enum dma_slave_buswidth src_addr_width;
452c156d0a5SLinus Walleij 	enum dma_slave_buswidth dst_addr_width;
453c156d0a5SLinus Walleij 	u32 src_maxburst;
454c156d0a5SLinus Walleij 	u32 dst_maxburst;
45554cd2558SPeter Ujfalusi 	u32 src_port_window_size;
45654cd2558SPeter Ujfalusi 	u32 dst_port_window_size;
457dcc043dcSViresh Kumar 	bool device_fc;
458e7bbb7acSVinod Koul 	void *peripheral_config;
459e7bbb7acSVinod Koul 	size_t peripheral_size;
460c156d0a5SLinus Walleij };
461c156d0a5SLinus Walleij 
46250720563SLars-Peter Clausen /**
46350720563SLars-Peter Clausen  * enum dma_residue_granularity - Granularity of the reported transfer residue
46450720563SLars-Peter Clausen  * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
46550720563SLars-Peter Clausen  *  DMA channel is only able to tell whether a descriptor has been completed or
46650720563SLars-Peter Clausen  *  not, which means residue reporting is not supported by this channel. The
46750720563SLars-Peter Clausen  *  residue field of the dma_tx_state field will always be 0.
46850720563SLars-Peter Clausen  * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
46950720563SLars-Peter Clausen  *  completed segment of the transfer (For cyclic transfers this is after each
47050720563SLars-Peter Clausen  *  period). This is typically implemented by having the hardware generate an
47150720563SLars-Peter Clausen  *  interrupt after each transferred segment and then the drivers updates the
47250720563SLars-Peter Clausen  *  outstanding residue by the size of the segment. Another possibility is if
47350720563SLars-Peter Clausen  *  the hardware supports scatter-gather and the segment descriptor has a field
47450720563SLars-Peter Clausen  *  which gets set after the segment has been completed. The driver then counts
47550720563SLars-Peter Clausen  *  the number of segments without the flag set to compute the residue.
47650720563SLars-Peter Clausen  * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
47750720563SLars-Peter Clausen  *  burst. This is typically only supported if the hardware has a progress
47850720563SLars-Peter Clausen  *  register of some sort (E.g. a register with the current read/write address
47950720563SLars-Peter Clausen  *  or a register with the amount of bursts/beats/bytes that have been
48050720563SLars-Peter Clausen  *  transferred or still need to be transferred).
48150720563SLars-Peter Clausen  */
48250720563SLars-Peter Clausen enum dma_residue_granularity {
48350720563SLars-Peter Clausen 	DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
48450720563SLars-Peter Clausen 	DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
48550720563SLars-Peter Clausen 	DMA_RESIDUE_GRANULARITY_BURST = 2,
48650720563SLars-Peter Clausen };
48750720563SLars-Peter Clausen 
488c2cbd427SStefan Brüns /**
489c2cbd427SStefan Brüns  * struct dma_slave_caps - expose capabilities of a slave channel only
490c2cbd427SStefan Brüns  * @src_addr_widths: bit mask of src addr widths the channel supports.
491c2cbd427SStefan Brüns  *	Width is specified in bytes, e.g. for a channel supporting
492c2cbd427SStefan Brüns  *	a width of 4 the mask should have BIT(4) set.
493c2cbd427SStefan Brüns  * @dst_addr_widths: bit mask of dst addr widths the channel supports
494c2cbd427SStefan Brüns  * @directions: bit mask of slave directions the channel supports.
495c2cbd427SStefan Brüns  *	Since the enum dma_transfer_direction is not defined as bit flag for
496c2cbd427SStefan Brüns  *	each type, the dma controller should set BIT(<TYPE>) and same
497221a27c7SVinod Koul  *	should be checked by controller as well
498d97758e0SSerge Semin  * @min_burst: min burst capability per-transfer
4996d5bbed3SShawn Lin  * @max_burst: max burst capability per-transfer
500b1b40b8fSSerge Semin  * @max_sg_burst: max number of SG list entries executed in a single burst
501b1b40b8fSSerge Semin  *	DMA tansaction with no software intervention for reinitialization.
502b1b40b8fSSerge Semin  *	Zero value means unlimited number of entries.
503d8095f94SMarek Szyprowski  * @cmd_pause: true, if pause is supported (i.e. for reading residue or
504d8095f94SMarek Szyprowski  *	       for resume later)
505d8095f94SMarek Szyprowski  * @cmd_resume: true, if resume is supported
506221a27c7SVinod Koul  * @cmd_terminate: true, if terminate cmd is supported
50750720563SLars-Peter Clausen  * @residue_granularity: granularity of the reported transfer residue
50827242021SVinod Koul  * @descriptor_reuse: if a descriptor can be reused by client and
50927242021SVinod Koul  * resubmitted multiple times
510221a27c7SVinod Koul  */
511221a27c7SVinod Koul struct dma_slave_caps {
512221a27c7SVinod Koul 	u32 src_addr_widths;
513ceacbdbfSMaxime Ripard 	u32 dst_addr_widths;
514221a27c7SVinod Koul 	u32 directions;
515d97758e0SSerge Semin 	u32 min_burst;
5166d5bbed3SShawn Lin 	u32 max_burst;
517b1b40b8fSSerge Semin 	u32 max_sg_burst;
518221a27c7SVinod Koul 	bool cmd_pause;
519d8095f94SMarek Szyprowski 	bool cmd_resume;
520221a27c7SVinod Koul 	bool cmd_terminate;
52150720563SLars-Peter Clausen 	enum dma_residue_granularity residue_granularity;
52227242021SVinod Koul 	bool descriptor_reuse;
523221a27c7SVinod Koul };
524221a27c7SVinod Koul 
dma_chan_name(struct dma_chan * chan)52541d5e59cSDan Williams static inline const char *dma_chan_name(struct dma_chan *chan)
52641d5e59cSDan Williams {
52741d5e59cSDan Williams 	return dev_name(&chan->dev->device);
52841d5e59cSDan Williams }
529d379b01eSDan Williams 
530c13c8260SChris Leech /**
53159b5ec21SDan Williams  * typedef dma_filter_fn - callback filter for dma_request_channel
53259b5ec21SDan Williams  * @chan: channel to be reviewed
53359b5ec21SDan Williams  * @filter_param: opaque parameter passed through dma_request_channel
53459b5ec21SDan Williams  *
53559b5ec21SDan Williams  * When this optional parameter is specified in a call to dma_request_channel a
53659b5ec21SDan Williams  * suitable channel is passed to this routine for further dispositioning before
53759b5ec21SDan Williams  * being returned.  Where 'suitable' indicates a non-busy channel that
5387dd60251SDan Williams  * satisfies the given capability mask.  It returns 'true' to indicate that the
5397dd60251SDan Williams  * channel is suitable.
54059b5ec21SDan Williams  */
5417dd60251SDan Williams typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
54259b5ec21SDan Williams 
5437405f74bSDan Williams typedef void (*dma_async_tx_callback)(void *dma_async_param);
544d38a8c62SDan Williams 
545f067025bSDave Jiang enum dmaengine_tx_result {
546f067025bSDave Jiang 	DMA_TRANS_NOERROR = 0,		/* SUCCESS */
547f067025bSDave Jiang 	DMA_TRANS_READ_FAILED,		/* Source DMA read failed */
548f067025bSDave Jiang 	DMA_TRANS_WRITE_FAILED,		/* Destination DMA write failed */
549f067025bSDave Jiang 	DMA_TRANS_ABORTED,		/* Op never submitted / aborted */
550f067025bSDave Jiang };
551f067025bSDave Jiang 
552f067025bSDave Jiang struct dmaengine_result {
553f067025bSDave Jiang 	enum dmaengine_tx_result result;
554f067025bSDave Jiang 	u32 residue;
555f067025bSDave Jiang };
556f067025bSDave Jiang 
557f067025bSDave Jiang typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
558f067025bSDave Jiang 				const struct dmaengine_result *result);
559f067025bSDave Jiang 
560d38a8c62SDan Williams struct dmaengine_unmap_data {
5610c0eb4caSZi Yan #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
5620c0eb4caSZi Yan 	u16 map_cnt;
5630c0eb4caSZi Yan #else
564c1f43dd9SXuelin Shi 	u8 map_cnt;
5650c0eb4caSZi Yan #endif
566d38a8c62SDan Williams 	u8 to_cnt;
567d38a8c62SDan Williams 	u8 from_cnt;
568d38a8c62SDan Williams 	u8 bidi_cnt;
569d38a8c62SDan Williams 	struct device *dev;
570d38a8c62SDan Williams 	struct kref kref;
571d38a8c62SDan Williams 	size_t len;
572466f966bSGustavo A. R. Silva 	dma_addr_t addr[];
573d38a8c62SDan Williams };
574d38a8c62SDan Williams 
5754db8fd32SPeter Ujfalusi struct dma_async_tx_descriptor;
5764db8fd32SPeter Ujfalusi 
5774db8fd32SPeter Ujfalusi struct dma_descriptor_metadata_ops {
5784db8fd32SPeter Ujfalusi 	int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
5794db8fd32SPeter Ujfalusi 		      size_t len);
5804db8fd32SPeter Ujfalusi 
5814db8fd32SPeter Ujfalusi 	void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
5824db8fd32SPeter Ujfalusi 			 size_t *payload_len, size_t *max_len);
5834db8fd32SPeter Ujfalusi 	int (*set_len)(struct dma_async_tx_descriptor *desc,
5844db8fd32SPeter Ujfalusi 		       size_t payload_len);
5854db8fd32SPeter Ujfalusi };
5864db8fd32SPeter Ujfalusi 
5877405f74bSDan Williams /**
5887405f74bSDan Williams  * struct dma_async_tx_descriptor - async transaction descriptor
5897405f74bSDan Williams  * ---dma generic offload fields---
5907405f74bSDan Williams  * @cookie: tracking cookie for this transaction, set to -EBUSY if
5917405f74bSDan Williams  *	this tx is sitting on a dependency list
592636bdeaaSDan Williams  * @flags: flags to augment operation preparation, control completion, and
593636bdeaaSDan Williams  *	communicate status
5947405f74bSDan Williams  * @phys: physical address of the descriptor
5957405f74bSDan Williams  * @chan: target channel for this operation
596aba96badSVinod Koul  * @tx_submit: accept the descriptor, assign ordered cookie and mark the
597790fb995SRandy Dunlap  * @desc_free: driver's callback function to free a resusable descriptor
598790fb995SRandy Dunlap  *	after completion
599aba96badSVinod Koul  * descriptor pending. To be pushed on .issue_pending() call
6007405f74bSDan Williams  * @callback: routine to call after this operation is complete
601790fb995SRandy Dunlap  * @callback_result: error result from a DMA transaction
6027405f74bSDan Williams  * @callback_param: general parameter to pass to the callback routine
603790fb995SRandy Dunlap  * @unmap: hook for generic DMA unmap data
6044db8fd32SPeter Ujfalusi  * @desc_metadata_mode: core managed metadata mode to protect mixed use of
6054db8fd32SPeter Ujfalusi  *	DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
6064db8fd32SPeter Ujfalusi  *	DESC_METADATA_NONE
6074db8fd32SPeter Ujfalusi  * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
6084db8fd32SPeter Ujfalusi  *	DMA driver if metadata mode is supported with the descriptor
6097405f74bSDan Williams  * ---async_tx api specific fields---
61019242d72SDan Williams  * @next: at completion submit this descriptor
6117405f74bSDan Williams  * @parent: pointer to the next level up in the dependency chain
61219242d72SDan Williams  * @lock: protect the parent and next pointers
6137405f74bSDan Williams  */
6147405f74bSDan Williams struct dma_async_tx_descriptor {
6157405f74bSDan Williams 	dma_cookie_t cookie;
616636bdeaaSDan Williams 	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
6177405f74bSDan Williams 	dma_addr_t phys;
6187405f74bSDan Williams 	struct dma_chan *chan;
6197405f74bSDan Williams 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
62027242021SVinod Koul 	int (*desc_free)(struct dma_async_tx_descriptor *tx);
6217405f74bSDan Williams 	dma_async_tx_callback callback;
622f067025bSDave Jiang 	dma_async_tx_callback_result callback_result;
6237405f74bSDan Williams 	void *callback_param;
624d38a8c62SDan Williams 	struct dmaengine_unmap_data *unmap;
6254db8fd32SPeter Ujfalusi 	enum dma_desc_metadata_mode desc_metadata_mode;
6264db8fd32SPeter Ujfalusi 	struct dma_descriptor_metadata_ops *metadata_ops;
6275fc6d897SDan Williams #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
62819242d72SDan Williams 	struct dma_async_tx_descriptor *next;
6297405f74bSDan Williams 	struct dma_async_tx_descriptor *parent;
6307405f74bSDan Williams 	spinlock_t lock;
631caa20d97SDan Williams #endif
6327405f74bSDan Williams };
6337405f74bSDan Williams 
63489716462SDan Williams #ifdef CONFIG_DMA_ENGINE
dma_set_unmap(struct dma_async_tx_descriptor * tx,struct dmaengine_unmap_data * unmap)635d38a8c62SDan Williams static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
636d38a8c62SDan Williams 				 struct dmaengine_unmap_data *unmap)
637d38a8c62SDan Williams {
638d38a8c62SDan Williams 	kref_get(&unmap->kref);
639d38a8c62SDan Williams 	tx->unmap = unmap;
640d38a8c62SDan Williams }
641d38a8c62SDan Williams 
64289716462SDan Williams struct dmaengine_unmap_data *
64389716462SDan Williams dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
64445c463aeSDan Williams void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
64589716462SDan Williams #else
dma_set_unmap(struct dma_async_tx_descriptor * tx,struct dmaengine_unmap_data * unmap)64689716462SDan Williams static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
64789716462SDan Williams 				 struct dmaengine_unmap_data *unmap)
64889716462SDan Williams {
64989716462SDan Williams }
65089716462SDan Williams static inline struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device * dev,int nr,gfp_t flags)65189716462SDan Williams dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
65289716462SDan Williams {
65389716462SDan Williams 	return NULL;
65489716462SDan Williams }
dmaengine_unmap_put(struct dmaengine_unmap_data * unmap)65589716462SDan Williams static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
65689716462SDan Williams {
65789716462SDan Williams }
65889716462SDan Williams #endif
65945c463aeSDan Williams 
dma_descriptor_unmap(struct dma_async_tx_descriptor * tx)660d38a8c62SDan Williams static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
661d38a8c62SDan Williams {
6623a92063bSAndy Shevchenko 	if (!tx->unmap)
6633a92063bSAndy Shevchenko 		return;
6643a92063bSAndy Shevchenko 
66545c463aeSDan Williams 	dmaengine_unmap_put(tx->unmap);
666d38a8c62SDan Williams 	tx->unmap = NULL;
667d38a8c62SDan Williams }
668d38a8c62SDan Williams 
6695fc6d897SDan Williams #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
txd_lock(struct dma_async_tx_descriptor * txd)670caa20d97SDan Williams static inline void txd_lock(struct dma_async_tx_descriptor *txd)
671caa20d97SDan Williams {
672caa20d97SDan Williams }
txd_unlock(struct dma_async_tx_descriptor * txd)673caa20d97SDan Williams static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
674caa20d97SDan Williams {
675caa20d97SDan Williams }
txd_chain(struct dma_async_tx_descriptor * txd,struct dma_async_tx_descriptor * next)676caa20d97SDan Williams static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
677caa20d97SDan Williams {
678caa20d97SDan Williams 	BUG();
679caa20d97SDan Williams }
txd_clear_parent(struct dma_async_tx_descriptor * txd)680caa20d97SDan Williams static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
681caa20d97SDan Williams {
682caa20d97SDan Williams }
txd_clear_next(struct dma_async_tx_descriptor * txd)683caa20d97SDan Williams static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
684caa20d97SDan Williams {
685caa20d97SDan Williams }
txd_next(struct dma_async_tx_descriptor * txd)686caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
687caa20d97SDan Williams {
688caa20d97SDan Williams 	return NULL;
689caa20d97SDan Williams }
txd_parent(struct dma_async_tx_descriptor * txd)690caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
691caa20d97SDan Williams {
692caa20d97SDan Williams 	return NULL;
693caa20d97SDan Williams }
694caa20d97SDan Williams 
695caa20d97SDan Williams #else
txd_lock(struct dma_async_tx_descriptor * txd)696caa20d97SDan Williams static inline void txd_lock(struct dma_async_tx_descriptor *txd)
697caa20d97SDan Williams {
698caa20d97SDan Williams 	spin_lock_bh(&txd->lock);
699caa20d97SDan Williams }
txd_unlock(struct dma_async_tx_descriptor * txd)700caa20d97SDan Williams static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
701caa20d97SDan Williams {
702caa20d97SDan Williams 	spin_unlock_bh(&txd->lock);
703caa20d97SDan Williams }
txd_chain(struct dma_async_tx_descriptor * txd,struct dma_async_tx_descriptor * next)704caa20d97SDan Williams static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
705caa20d97SDan Williams {
706caa20d97SDan Williams 	txd->next = next;
707caa20d97SDan Williams 	next->parent = txd;
708caa20d97SDan Williams }
txd_clear_parent(struct dma_async_tx_descriptor * txd)709caa20d97SDan Williams static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
710caa20d97SDan Williams {
711caa20d97SDan Williams 	txd->parent = NULL;
712caa20d97SDan Williams }
txd_clear_next(struct dma_async_tx_descriptor * txd)713caa20d97SDan Williams static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
714caa20d97SDan Williams {
715caa20d97SDan Williams 	txd->next = NULL;
716caa20d97SDan Williams }
txd_parent(struct dma_async_tx_descriptor * txd)717caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
718caa20d97SDan Williams {
719caa20d97SDan Williams 	return txd->parent;
720caa20d97SDan Williams }
txd_next(struct dma_async_tx_descriptor * txd)721caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
722caa20d97SDan Williams {
723caa20d97SDan Williams 	return txd->next;
724caa20d97SDan Williams }
725caa20d97SDan Williams #endif
726caa20d97SDan Williams 
727c13c8260SChris Leech /**
72807934481SLinus Walleij  * struct dma_tx_state - filled in to report the status of
72907934481SLinus Walleij  * a transfer.
73007934481SLinus Walleij  * @last: last completed DMA cookie
73107934481SLinus Walleij  * @used: last issued DMA cookie (i.e. the one in progress)
73207934481SLinus Walleij  * @residue: the remaining number of bytes left to transmit
73307934481SLinus Walleij  *	on the selected transfer for states DMA_IN_PROGRESS and
73407934481SLinus Walleij  *	DMA_PAUSED if this is implemented in the driver, else 0
7356755ec06SPeter Ujfalusi  * @in_flight_bytes: amount of data in bytes cached by the DMA.
73607934481SLinus Walleij  */
73707934481SLinus Walleij struct dma_tx_state {
73807934481SLinus Walleij 	dma_cookie_t last;
73907934481SLinus Walleij 	dma_cookie_t used;
74007934481SLinus Walleij 	u32 residue;
7416755ec06SPeter Ujfalusi 	u32 in_flight_bytes;
74207934481SLinus Walleij };
74307934481SLinus Walleij 
74407934481SLinus Walleij /**
74577a68e56SMaxime Ripard  * enum dmaengine_alignment - defines alignment of the DMA async tx
74677a68e56SMaxime Ripard  * buffers
74777a68e56SMaxime Ripard  */
74877a68e56SMaxime Ripard enum dmaengine_alignment {
74977a68e56SMaxime Ripard 	DMAENGINE_ALIGN_1_BYTE = 0,
75077a68e56SMaxime Ripard 	DMAENGINE_ALIGN_2_BYTES = 1,
75177a68e56SMaxime Ripard 	DMAENGINE_ALIGN_4_BYTES = 2,
75277a68e56SMaxime Ripard 	DMAENGINE_ALIGN_8_BYTES = 3,
75377a68e56SMaxime Ripard 	DMAENGINE_ALIGN_16_BYTES = 4,
75477a68e56SMaxime Ripard 	DMAENGINE_ALIGN_32_BYTES = 5,
75577a68e56SMaxime Ripard 	DMAENGINE_ALIGN_64_BYTES = 6,
756660343d0SPeter Ujfalusi 	DMAENGINE_ALIGN_128_BYTES = 7,
757660343d0SPeter Ujfalusi 	DMAENGINE_ALIGN_256_BYTES = 8,
75877a68e56SMaxime Ripard };
75977a68e56SMaxime Ripard 
76077a68e56SMaxime Ripard /**
761a8135d0dSPeter Ujfalusi  * struct dma_slave_map - associates slave device and it's slave channel with
762a8135d0dSPeter Ujfalusi  * parameter to be used by a filter function
763a8135d0dSPeter Ujfalusi  * @devname: name of the device
764a8135d0dSPeter Ujfalusi  * @slave: slave channel name
765a8135d0dSPeter Ujfalusi  * @param: opaque parameter to pass to struct dma_filter.fn
766a8135d0dSPeter Ujfalusi  */
767a8135d0dSPeter Ujfalusi struct dma_slave_map {
768a8135d0dSPeter Ujfalusi 	const char *devname;
769a8135d0dSPeter Ujfalusi 	const char *slave;
770a8135d0dSPeter Ujfalusi 	void *param;
771a8135d0dSPeter Ujfalusi };
772a8135d0dSPeter Ujfalusi 
773a8135d0dSPeter Ujfalusi /**
774a8135d0dSPeter Ujfalusi  * struct dma_filter - information for slave device/channel to filter_fn/param
775a8135d0dSPeter Ujfalusi  * mapping
776a8135d0dSPeter Ujfalusi  * @fn: filter function callback
777a8135d0dSPeter Ujfalusi  * @mapcnt: number of slave device/channel in the map
778a8135d0dSPeter Ujfalusi  * @map: array of channel to filter mapping data
779a8135d0dSPeter Ujfalusi  */
780a8135d0dSPeter Ujfalusi struct dma_filter {
781a8135d0dSPeter Ujfalusi 	dma_filter_fn fn;
782a8135d0dSPeter Ujfalusi 	int mapcnt;
783a8135d0dSPeter Ujfalusi 	const struct dma_slave_map *map;
784a8135d0dSPeter Ujfalusi };
785a8135d0dSPeter Ujfalusi 
786a8135d0dSPeter Ujfalusi /**
787c13c8260SChris Leech  * struct dma_device - info on the entity supplying DMA services
7888b544310SAndy Shevchenko  * @ref: reference is taken and put every time a channel is allocated or freed
789c13c8260SChris Leech  * @chancnt: how many DMA channels are supported
7900f571515SAtsushi Nemoto  * @privatecnt: how many DMA channels are requested by dma_request_channel
791c13c8260SChris Leech  * @channels: the list of struct dma_chan
792c13c8260SChris Leech  * @global_node: list_head for global dma_device_list
793a8135d0dSPeter Ujfalusi  * @filter: information for device/slave to filter function/param mapping
7947405f74bSDan Williams  * @cap_mask: one or more dma_capability flags
7954db8fd32SPeter Ujfalusi  * @desc_metadata_modes: supported metadata modes by the DMA device
7967405f74bSDan Williams  * @max_xor: maximum number of xor sources, 0 if no capability
797b2f46fd8SDan Williams  * @max_pq: maximum number of PQ sources and PQ-continue capability
79883544ae9SDan Williams  * @copy_align: alignment shift for memcpy operations
79983544ae9SDan Williams  * @xor_align: alignment shift for xor operations
80083544ae9SDan Williams  * @pq_align: alignment shift for pq operations
8014983a501SMaxime Ripard  * @fill_align: alignment shift for memset operations
802fe4ada2dSRandy Dunlap  * @dev_id: unique device ID
8037405f74bSDan Williams  * @dev: struct device reference for dma mapping api
804dae7a589SLogan Gunthorpe  * @owner: owner module (automatically set based on the provided dev)
8058b544310SAndy Shevchenko  * @chan_ida: unique channel ID
806cb8cea51SMaxime Ripard  * @src_addr_widths: bit mask of src addr widths the device supports
807c2cbd427SStefan Brüns  *	Width is specified in bytes, e.g. for a device supporting
808c2cbd427SStefan Brüns  *	a width of 4 the mask should have BIT(4) set.
809cb8cea51SMaxime Ripard  * @dst_addr_widths: bit mask of dst addr widths the device supports
810c2cbd427SStefan Brüns  * @directions: bit mask of slave directions the device supports.
811c2cbd427SStefan Brüns  *	Since the enum dma_transfer_direction is not defined as bit flag for
812c2cbd427SStefan Brüns  *	each type, the dma controller should set BIT(<TYPE>) and same
813c2cbd427SStefan Brüns  *	should be checked by controller as well
814d97758e0SSerge Semin  * @min_burst: min burst capability per-transfer
8156d5bbed3SShawn Lin  * @max_burst: max burst capability per-transfer
816b1b40b8fSSerge Semin  * @max_sg_burst: max number of SG list entries executed in a single burst
817b1b40b8fSSerge Semin  *	DMA tansaction with no software intervention for reinitialization.
818b1b40b8fSSerge Semin  *	Zero value means unlimited number of entries.
8198b544310SAndy Shevchenko  * @descriptor_reuse: a submitted transfer can be resubmitted after completion
820cb8cea51SMaxime Ripard  * @residue_granularity: granularity of the transfer residue reported
821cb8cea51SMaxime Ripard  *	by tx_status
822fe4ada2dSRandy Dunlap  * @device_alloc_chan_resources: allocate resources and return the
823fe4ada2dSRandy Dunlap  *	number of allocated descriptors
8244f910c03SPeter Ujfalusi  * @device_router_config: optional callback for DMA router configuration
825fe4ada2dSRandy Dunlap  * @device_free_chan_resources: release DMA channel's resources
8267405f74bSDan Williams  * @device_prep_dma_memcpy: prepares a memcpy operation
8277405f74bSDan Williams  * @device_prep_dma_xor: prepares a xor operation
828099f53cbSDan Williams  * @device_prep_dma_xor_val: prepares a xor validation operation
829b2f46fd8SDan Williams  * @device_prep_dma_pq: prepares a pq operation
830b2f46fd8SDan Williams  * @device_prep_dma_pq_val: prepares a pqzero_sum operation
8314983a501SMaxime Ripard  * @device_prep_dma_memset: prepares a memset operation
83250c7cd2bSMaxime Ripard  * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
8337405f74bSDan Williams  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
834790fb995SRandy Dunlap  * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer,
835790fb995SRandy Dunlap  *	where the address and size of each segment is located in one entry of
836790fb995SRandy Dunlap  *	the dma_vec array.
837dc0ee643SHaavard Skinnemoen  * @device_prep_slave_sg: prepares a slave dma operation
838782bc950SSascha Hauer  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
839782bc950SSascha Hauer  *	The function takes a buffer of size buf_len. The callback function will
840782bc950SSascha Hauer  *	be called after period_len bytes have been transferred.
841b14dab79SJassi Brar  * @device_prep_interleaved_dma: Transfer expression in a generic way.
8423b6d694eSSerge Semin  * @device_caps: May be used to override the generic DMA slave capabilities
8433b6d694eSSerge Semin  *	with per-channel specific ones
84494a73e30SMaxime Ripard  * @device_config: Pushes a new configuration to a channel, return 0 or an error
84594a73e30SMaxime Ripard  *	code
84623a3ea2fSMaxime Ripard  * @device_pause: Pauses any transfer happening on a channel. Returns
84723a3ea2fSMaxime Ripard  *	0 or an error code
84823a3ea2fSMaxime Ripard  * @device_resume: Resumes any transfer on a channel previously
84923a3ea2fSMaxime Ripard  *	paused. Returns 0 or an error code
8507fa0cf46SMaxime Ripard  * @device_terminate_all: Aborts all transfers on a channel. Returns 0
8517fa0cf46SMaxime Ripard  *	or an error code
852b36f09c3SLars-Peter Clausen  * @device_synchronize: Synchronizes the termination of a transfers to the
853b36f09c3SLars-Peter Clausen  *  current context.
85407934481SLinus Walleij  * @device_tx_status: poll for transaction completion, the optional
85507934481SLinus Walleij  *	txstate parameter can be supplied with a pointer to get a
85625985edcSLucas De Marchi  *	struct with auxiliary transfer status information, otherwise the call
85707934481SLinus Walleij  *	will just return a simple status code
8587405f74bSDan Williams  * @device_issue_pending: push pending transactions to hardware
8598ad342a8SLogan Gunthorpe  * @device_release: called sometime atfer dma_async_device_unregister() is
8608ad342a8SLogan Gunthorpe  *     called and there are no further references to this structure. This
8618ad342a8SLogan Gunthorpe  *     must be implemented to free resources however many existing drivers
8628ad342a8SLogan Gunthorpe  *     do not and are therefore not safe to unbind while in use.
863e937cc1dSPeter Ujfalusi  * @dbg_summary_show: optional routine to show contents in debugfs; default code
864e937cc1dSPeter Ujfalusi  *     will be used when this is omitted, but custom code can show extra,
865e937cc1dSPeter Ujfalusi  *     controller specific information.
8668b544310SAndy Shevchenko  * @dbg_dev_root: the root folder in debugfs for this device
867c13c8260SChris Leech  */
868c13c8260SChris Leech struct dma_device {
8698ad342a8SLogan Gunthorpe 	struct kref ref;
870c13c8260SChris Leech 	unsigned int chancnt;
8710f571515SAtsushi Nemoto 	unsigned int privatecnt;
872c13c8260SChris Leech 	struct list_head channels;
873c13c8260SChris Leech 	struct list_head global_node;
874a8135d0dSPeter Ujfalusi 	struct dma_filter filter;
8757405f74bSDan Williams 	dma_cap_mask_t cap_mask;
8764db8fd32SPeter Ujfalusi 	enum dma_desc_metadata_mode desc_metadata_modes;
877b2f46fd8SDan Williams 	unsigned short max_xor;
878b2f46fd8SDan Williams 	unsigned short max_pq;
87977a68e56SMaxime Ripard 	enum dmaengine_alignment copy_align;
88077a68e56SMaxime Ripard 	enum dmaengine_alignment xor_align;
88177a68e56SMaxime Ripard 	enum dmaengine_alignment pq_align;
88277a68e56SMaxime Ripard 	enum dmaengine_alignment fill_align;
883b2f46fd8SDan Williams 	#define DMA_HAS_PQ_CONTINUE (1 << 15)
884c13c8260SChris Leech 
885c13c8260SChris Leech 	int dev_id;
8867405f74bSDan Williams 	struct device *dev;
887dae7a589SLogan Gunthorpe 	struct module *owner;
88808210094SDave Jiang 	struct ida chan_ida;
889c13c8260SChris Leech 
890cb8cea51SMaxime Ripard 	u32 src_addr_widths;
891cb8cea51SMaxime Ripard 	u32 dst_addr_widths;
892cb8cea51SMaxime Ripard 	u32 directions;
893d97758e0SSerge Semin 	u32 min_burst;
8946d5bbed3SShawn Lin 	u32 max_burst;
895b1b40b8fSSerge Semin 	u32 max_sg_burst;
8969eeacd3aSRobert Jarzmik 	bool descriptor_reuse;
897cb8cea51SMaxime Ripard 	enum dma_residue_granularity residue_granularity;
898cb8cea51SMaxime Ripard 
899aa1e6f1aSDan Williams 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
9004f910c03SPeter Ujfalusi 	int (*device_router_config)(struct dma_chan *chan);
901c13c8260SChris Leech 	void (*device_free_chan_resources)(struct dma_chan *chan);
9027405f74bSDan Williams 
9037405f74bSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
904ceacbdbfSMaxime Ripard 		struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
905d4c56f97SDan Williams 		size_t len, unsigned long flags);
9067405f74bSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
907ceacbdbfSMaxime Ripard 		struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
908d4c56f97SDan Williams 		unsigned int src_cnt, size_t len, unsigned long flags);
909099f53cbSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
9100036731cSDan Williams 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
911ad283ea4SDan Williams 		size_t len, enum sum_check_flags *result, unsigned long flags);
912b2f46fd8SDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
913b2f46fd8SDan Williams 		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
914b2f46fd8SDan Williams 		unsigned int src_cnt, const unsigned char *scf,
915b2f46fd8SDan Williams 		size_t len, unsigned long flags);
916b2f46fd8SDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
917b2f46fd8SDan Williams 		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
918b2f46fd8SDan Williams 		unsigned int src_cnt, const unsigned char *scf, size_t len,
919b2f46fd8SDan Williams 		enum sum_check_flags *pqres, unsigned long flags);
9204983a501SMaxime Ripard 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
9214983a501SMaxime Ripard 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
9224983a501SMaxime Ripard 		unsigned long flags);
92350c7cd2bSMaxime Ripard 	struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
92450c7cd2bSMaxime Ripard 		struct dma_chan *chan, struct scatterlist *sg,
92550c7cd2bSMaxime Ripard 		unsigned int nents, int value, unsigned long flags);
9267405f74bSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
927636bdeaaSDan Williams 		struct dma_chan *chan, unsigned long flags);
9287405f74bSDan Williams 
9295878853fSPaul Cercueil 	struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)(
9305878853fSPaul Cercueil 		struct dma_chan *chan, const struct dma_vec *vecs,
9315878853fSPaul Cercueil 		size_t nents, enum dma_transfer_direction direction,
9325878853fSPaul Cercueil 		unsigned long flags);
933dc0ee643SHaavard Skinnemoen 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
934dc0ee643SHaavard Skinnemoen 		struct dma_chan *chan, struct scatterlist *sgl,
93549920bc6SVinod Koul 		unsigned int sg_len, enum dma_transfer_direction direction,
936185ecb5fSAlexandre Bounine 		unsigned long flags, void *context);
937782bc950SSascha Hauer 	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
938782bc950SSascha Hauer 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
939185ecb5fSAlexandre Bounine 		size_t period_len, enum dma_transfer_direction direction,
94031c1e5a1SLaurent Pinchart 		unsigned long flags);
941b14dab79SJassi Brar 	struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
942b14dab79SJassi Brar 		struct dma_chan *chan, struct dma_interleaved_template *xt,
943b14dab79SJassi Brar 		unsigned long flags);
94494a73e30SMaxime Ripard 
9458b544310SAndy Shevchenko 	void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
9468b544310SAndy Shevchenko 	int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
94723a3ea2fSMaxime Ripard 	int (*device_pause)(struct dma_chan *chan);
94823a3ea2fSMaxime Ripard 	int (*device_resume)(struct dma_chan *chan);
9497fa0cf46SMaxime Ripard 	int (*device_terminate_all)(struct dma_chan *chan);
950b36f09c3SLars-Peter Clausen 	void (*device_synchronize)(struct dma_chan *chan);
951dc0ee643SHaavard Skinnemoen 
95207934481SLinus Walleij 	enum dma_status (*device_tx_status)(struct dma_chan *chan,
95307934481SLinus Walleij 					    dma_cookie_t cookie,
95407934481SLinus Walleij 					    struct dma_tx_state *txstate);
9557405f74bSDan Williams 	void (*device_issue_pending)(struct dma_chan *chan);
9568ad342a8SLogan Gunthorpe 	void (*device_release)(struct dma_device *dev);
957e937cc1dSPeter Ujfalusi 	/* debugfs support */
958e937cc1dSPeter Ujfalusi 	void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
95926cf132dSPeter Ujfalusi 	struct dentry *dbg_dev_root;
960c13c8260SChris Leech };
961c13c8260SChris Leech 
dmaengine_slave_config(struct dma_chan * chan,struct dma_slave_config * config)9626e3ecaf0SSascha Hauer static inline int dmaengine_slave_config(struct dma_chan *chan,
9636e3ecaf0SSascha Hauer 					  struct dma_slave_config *config)
9646e3ecaf0SSascha Hauer {
96594a73e30SMaxime Ripard 	if (chan->device->device_config)
96694a73e30SMaxime Ripard 		return chan->device->device_config(chan, config);
96794a73e30SMaxime Ripard 
9682c44ad91SMaxime Ripard 	return -ENOSYS;
9696e3ecaf0SSascha Hauer }
9706e3ecaf0SSascha Hauer 
is_slave_direction(enum dma_transfer_direction direction)97161cc13a5SAndy Shevchenko static inline bool is_slave_direction(enum dma_transfer_direction direction)
97261cc13a5SAndy Shevchenko {
973a22fe1d6SFrank Li 	return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
974a22fe1d6SFrank Li 	       (direction == DMA_DEV_TO_DEV);
97561cc13a5SAndy Shevchenko }
97661cc13a5SAndy Shevchenko 
dmaengine_prep_slave_single(struct dma_chan * chan,dma_addr_t buf,size_t len,enum dma_transfer_direction dir,unsigned long flags)97790b44f8fSVinod Koul static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
978922ee08bSKuninori Morimoto 	struct dma_chan *chan, dma_addr_t buf, size_t len,
97949920bc6SVinod Koul 	enum dma_transfer_direction dir, unsigned long flags)
98090b44f8fSVinod Koul {
98190b44f8fSVinod Koul 	struct scatterlist sg;
982922ee08bSKuninori Morimoto 	sg_init_table(&sg, 1);
983922ee08bSKuninori Morimoto 	sg_dma_address(&sg) = buf;
984922ee08bSKuninori Morimoto 	sg_dma_len(&sg) = len;
98590b44f8fSVinod Koul 
986757d12e5SVinod Koul 	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
987757d12e5SVinod Koul 		return NULL;
988757d12e5SVinod Koul 
989185ecb5fSAlexandre Bounine 	return chan->device->device_prep_slave_sg(chan, &sg, 1,
990185ecb5fSAlexandre Bounine 						  dir, flags, NULL);
99190b44f8fSVinod Koul }
99290b44f8fSVinod Koul 
9935878853fSPaul Cercueil /**
9945878853fSPaul Cercueil  * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor
9955878853fSPaul Cercueil  * @chan: The channel to be used for this descriptor
9965878853fSPaul Cercueil  * @vecs: The array of DMA vectors that should be transferred
9975878853fSPaul Cercueil  * @nents: The number of DMA vectors in the array
9985878853fSPaul Cercueil  * @dir: Specifies the direction of the data transfer
9995878853fSPaul Cercueil  * @flags: DMA engine flags
10005878853fSPaul Cercueil  */
dmaengine_prep_peripheral_dma_vec(struct dma_chan * chan,const struct dma_vec * vecs,size_t nents,enum dma_transfer_direction dir,unsigned long flags)10015878853fSPaul Cercueil static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
10025878853fSPaul Cercueil 	struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,
10035878853fSPaul Cercueil 	enum dma_transfer_direction dir, unsigned long flags)
10045878853fSPaul Cercueil {
10055878853fSPaul Cercueil 	if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec)
10065878853fSPaul Cercueil 		return NULL;
10075878853fSPaul Cercueil 
10085878853fSPaul Cercueil 	return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents,
10095878853fSPaul Cercueil 							    dir, flags);
10105878853fSPaul Cercueil }
10115878853fSPaul Cercueil 
dmaengine_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags)101216052827SAlexandre Bounine static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
101316052827SAlexandre Bounine 	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
101416052827SAlexandre Bounine 	enum dma_transfer_direction dir, unsigned long flags)
101516052827SAlexandre Bounine {
1016757d12e5SVinod Koul 	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
1017757d12e5SVinod Koul 		return NULL;
1018757d12e5SVinod Koul 
101916052827SAlexandre Bounine 	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
1020185ecb5fSAlexandre Bounine 						  dir, flags, NULL);
102116052827SAlexandre Bounine }
102216052827SAlexandre Bounine 
1023e42d98ebSAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1024e42d98ebSAlexandre Bounine struct rio_dma_ext;
dmaengine_prep_rio_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,struct rio_dma_ext * rio_ext)1025e42d98ebSAlexandre Bounine static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
1026e42d98ebSAlexandre Bounine 	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
1027e42d98ebSAlexandre Bounine 	enum dma_transfer_direction dir, unsigned long flags,
1028e42d98ebSAlexandre Bounine 	struct rio_dma_ext *rio_ext)
1029e42d98ebSAlexandre Bounine {
1030757d12e5SVinod Koul 	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
1031757d12e5SVinod Koul 		return NULL;
1032757d12e5SVinod Koul 
1033e42d98ebSAlexandre Bounine 	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
1034e42d98ebSAlexandre Bounine 						  dir, flags, rio_ext);
1035e42d98ebSAlexandre Bounine }
1036e42d98ebSAlexandre Bounine #endif
1037e42d98ebSAlexandre Bounine 
dmaengine_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)103816052827SAlexandre Bounine static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
103916052827SAlexandre Bounine 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1040e7736cdeSPeter Ujfalusi 		size_t period_len, enum dma_transfer_direction dir,
1041e7736cdeSPeter Ujfalusi 		unsigned long flags)
104216052827SAlexandre Bounine {
1043757d12e5SVinod Koul 	if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
1044757d12e5SVinod Koul 		return NULL;
1045757d12e5SVinod Koul 
104616052827SAlexandre Bounine 	return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
104731c1e5a1SLaurent Pinchart 						period_len, dir, flags);
10486e3ecaf0SSascha Hauer }
10496e3ecaf0SSascha Hauer 
dmaengine_prep_interleaved_dma(struct dma_chan * chan,struct dma_interleaved_template * xt,unsigned long flags)1050a14acb4aSBarry Song static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
1051a14acb4aSBarry Song 		struct dma_chan *chan, struct dma_interleaved_template *xt,
1052a14acb4aSBarry Song 		unsigned long flags)
1053a14acb4aSBarry Song {
1054757d12e5SVinod Koul 	if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
1055757d12e5SVinod Koul 		return NULL;
10569c8ebd8bSLaurent Pinchart 	if (flags & DMA_PREP_REPEAT &&
10579c8ebd8bSLaurent Pinchart 	    !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
10589c8ebd8bSLaurent Pinchart 		return NULL;
1059757d12e5SVinod Koul 
1060a14acb4aSBarry Song 	return chan->device->device_prep_interleaved_dma(chan, xt, flags);
1061a14acb4aSBarry Song }
1062a14acb4aSBarry Song 
1063fc44ff0aSBen Walker /**
1064fc44ff0aSBen Walker  * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
1065fc44ff0aSBen Walker  * @chan: The channel to be used for this descriptor
1066fc44ff0aSBen Walker  * @dest: Address of buffer to be set
1067fc44ff0aSBen Walker  * @value: Treated as a single byte value that fills the destination buffer
1068fc44ff0aSBen Walker  * @len: The total size of dest
1069fc44ff0aSBen Walker  * @flags: DMA engine flags
1070fc44ff0aSBen Walker  */
dmaengine_prep_dma_memset(struct dma_chan * chan,dma_addr_t dest,int value,size_t len,unsigned long flags)10714983a501SMaxime Ripard static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
10724983a501SMaxime Ripard 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
10734983a501SMaxime Ripard 		unsigned long flags)
10744983a501SMaxime Ripard {
1075757d12e5SVinod Koul 	if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
10764983a501SMaxime Ripard 		return NULL;
10774983a501SMaxime Ripard 
10784983a501SMaxime Ripard 	return chan->device->device_prep_dma_memset(chan, dest, value,
10794983a501SMaxime Ripard 						    len, flags);
10804983a501SMaxime Ripard }
10814983a501SMaxime Ripard 
dmaengine_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)108277d65d6fSBoris Brezillon static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
108377d65d6fSBoris Brezillon 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
108477d65d6fSBoris Brezillon 		size_t len, unsigned long flags)
108577d65d6fSBoris Brezillon {
108677d65d6fSBoris Brezillon 	if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
108777d65d6fSBoris Brezillon 		return NULL;
108877d65d6fSBoris Brezillon 
108977d65d6fSBoris Brezillon 	return chan->device->device_prep_dma_memcpy(chan, dest, src,
109077d65d6fSBoris Brezillon 						    len, flags);
109177d65d6fSBoris Brezillon }
109277d65d6fSBoris Brezillon 
dmaengine_is_metadata_mode_supported(struct dma_chan * chan,enum dma_desc_metadata_mode mode)10934db8fd32SPeter Ujfalusi static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
10944db8fd32SPeter Ujfalusi 		enum dma_desc_metadata_mode mode)
10954db8fd32SPeter Ujfalusi {
10964db8fd32SPeter Ujfalusi 	if (!chan)
10974db8fd32SPeter Ujfalusi 		return false;
10984db8fd32SPeter Ujfalusi 
10994db8fd32SPeter Ujfalusi 	return !!(chan->device->desc_metadata_modes & mode);
11004db8fd32SPeter Ujfalusi }
11014db8fd32SPeter Ujfalusi 
11024db8fd32SPeter Ujfalusi #ifdef CONFIG_DMA_ENGINE
11034db8fd32SPeter Ujfalusi int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
11044db8fd32SPeter Ujfalusi 				   void *data, size_t len);
11054db8fd32SPeter Ujfalusi void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
11064db8fd32SPeter Ujfalusi 				      size_t *payload_len, size_t *max_len);
11074db8fd32SPeter Ujfalusi int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
11084db8fd32SPeter Ujfalusi 				    size_t payload_len);
11094db8fd32SPeter Ujfalusi #else /* CONFIG_DMA_ENGINE */
dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor * desc,void * data,size_t len)11104db8fd32SPeter Ujfalusi static inline int dmaengine_desc_attach_metadata(
11114db8fd32SPeter Ujfalusi 		struct dma_async_tx_descriptor *desc, void *data, size_t len)
11124db8fd32SPeter Ujfalusi {
11134db8fd32SPeter Ujfalusi 	return -EINVAL;
11144db8fd32SPeter Ujfalusi }
dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor * desc,size_t * payload_len,size_t * max_len)11154db8fd32SPeter Ujfalusi static inline void *dmaengine_desc_get_metadata_ptr(
11164db8fd32SPeter Ujfalusi 		struct dma_async_tx_descriptor *desc, size_t *payload_len,
11174db8fd32SPeter Ujfalusi 		size_t *max_len)
11184db8fd32SPeter Ujfalusi {
11194db8fd32SPeter Ujfalusi 	return NULL;
11204db8fd32SPeter Ujfalusi }
dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor * desc,size_t payload_len)11214db8fd32SPeter Ujfalusi static inline int dmaengine_desc_set_metadata_len(
11224db8fd32SPeter Ujfalusi 		struct dma_async_tx_descriptor *desc, size_t payload_len)
11234db8fd32SPeter Ujfalusi {
11244db8fd32SPeter Ujfalusi 	return -EINVAL;
11254db8fd32SPeter Ujfalusi }
11264db8fd32SPeter Ujfalusi #endif /* CONFIG_DMA_ENGINE */
11274db8fd32SPeter Ujfalusi 
1128b36f09c3SLars-Peter Clausen /**
1129b36f09c3SLars-Peter Clausen  * dmaengine_terminate_all() - Terminate all active DMA transfers
1130b36f09c3SLars-Peter Clausen  * @chan: The channel for which to terminate the transfers
1131b36f09c3SLars-Peter Clausen  *
1132b36f09c3SLars-Peter Clausen  * This function is DEPRECATED use either dmaengine_terminate_sync() or
1133b36f09c3SLars-Peter Clausen  * dmaengine_terminate_async() instead.
1134b36f09c3SLars-Peter Clausen  */
dmaengine_terminate_all(struct dma_chan * chan)11356e3ecaf0SSascha Hauer static inline int dmaengine_terminate_all(struct dma_chan *chan)
11366e3ecaf0SSascha Hauer {
11377fa0cf46SMaxime Ripard 	if (chan->device->device_terminate_all)
11387fa0cf46SMaxime Ripard 		return chan->device->device_terminate_all(chan);
11397fa0cf46SMaxime Ripard 
11402c44ad91SMaxime Ripard 	return -ENOSYS;
11416e3ecaf0SSascha Hauer }
11426e3ecaf0SSascha Hauer 
1143b36f09c3SLars-Peter Clausen /**
1144b36f09c3SLars-Peter Clausen  * dmaengine_terminate_async() - Terminate all active DMA transfers
1145b36f09c3SLars-Peter Clausen  * @chan: The channel for which to terminate the transfers
1146b36f09c3SLars-Peter Clausen  *
1147b36f09c3SLars-Peter Clausen  * Calling this function will terminate all active and pending descriptors
1148b36f09c3SLars-Peter Clausen  * that have previously been submitted to the channel. It is not guaranteed
1149b36f09c3SLars-Peter Clausen  * though that the transfer for the active descriptor has stopped when the
1150b36f09c3SLars-Peter Clausen  * function returns. Furthermore it is possible the complete callback of a
1151b36f09c3SLars-Peter Clausen  * submitted transfer is still running when this function returns.
1152b36f09c3SLars-Peter Clausen  *
1153b36f09c3SLars-Peter Clausen  * dmaengine_synchronize() needs to be called before it is safe to free
1154b36f09c3SLars-Peter Clausen  * any memory that is accessed by previously submitted descriptors or before
1155b36f09c3SLars-Peter Clausen  * freeing any resources accessed from within the completion callback of any
115620d60f63SMaciej Grochowski  * previously submitted descriptors.
1157b36f09c3SLars-Peter Clausen  *
1158b36f09c3SLars-Peter Clausen  * This function can be called from atomic context as well as from within a
1159b36f09c3SLars-Peter Clausen  * complete callback of a descriptor submitted on the same channel.
1160b36f09c3SLars-Peter Clausen  *
1161b36f09c3SLars-Peter Clausen  * If none of the two conditions above apply consider using
1162b36f09c3SLars-Peter Clausen  * dmaengine_terminate_sync() instead.
1163b36f09c3SLars-Peter Clausen  */
dmaengine_terminate_async(struct dma_chan * chan)1164b36f09c3SLars-Peter Clausen static inline int dmaengine_terminate_async(struct dma_chan *chan)
1165b36f09c3SLars-Peter Clausen {
1166b36f09c3SLars-Peter Clausen 	if (chan->device->device_terminate_all)
1167b36f09c3SLars-Peter Clausen 		return chan->device->device_terminate_all(chan);
1168b36f09c3SLars-Peter Clausen 
1169b36f09c3SLars-Peter Clausen 	return -EINVAL;
1170b36f09c3SLars-Peter Clausen }
1171b36f09c3SLars-Peter Clausen 
1172b36f09c3SLars-Peter Clausen /**
1173b36f09c3SLars-Peter Clausen  * dmaengine_synchronize() - Synchronize DMA channel termination
1174b36f09c3SLars-Peter Clausen  * @chan: The channel to synchronize
1175b36f09c3SLars-Peter Clausen  *
1176b36f09c3SLars-Peter Clausen  * Synchronizes to the DMA channel termination to the current context. When this
1177b36f09c3SLars-Peter Clausen  * function returns it is guaranteed that all transfers for previously issued
117820d60f63SMaciej Grochowski  * descriptors have stopped and it is safe to free the memory associated
1179b36f09c3SLars-Peter Clausen  * with them. Furthermore it is guaranteed that all complete callback functions
1180b36f09c3SLars-Peter Clausen  * for a previously submitted descriptor have finished running and it is safe to
1181b36f09c3SLars-Peter Clausen  * free resources accessed from within the complete callbacks.
1182b36f09c3SLars-Peter Clausen  *
1183b36f09c3SLars-Peter Clausen  * The behavior of this function is undefined if dma_async_issue_pending() has
1184b36f09c3SLars-Peter Clausen  * been called between dmaengine_terminate_async() and this function.
1185b36f09c3SLars-Peter Clausen  *
1186b36f09c3SLars-Peter Clausen  * This function must only be called from non-atomic context and must not be
1187b36f09c3SLars-Peter Clausen  * called from within a complete callback of a descriptor submitted on the same
1188b36f09c3SLars-Peter Clausen  * channel.
1189b36f09c3SLars-Peter Clausen  */
dmaengine_synchronize(struct dma_chan * chan)1190b36f09c3SLars-Peter Clausen static inline void dmaengine_synchronize(struct dma_chan *chan)
1191b36f09c3SLars-Peter Clausen {
1192b1d6ab1aSLars-Peter Clausen 	might_sleep();
1193b1d6ab1aSLars-Peter Clausen 
1194b36f09c3SLars-Peter Clausen 	if (chan->device->device_synchronize)
1195b36f09c3SLars-Peter Clausen 		chan->device->device_synchronize(chan);
1196b36f09c3SLars-Peter Clausen }
1197b36f09c3SLars-Peter Clausen 
1198b36f09c3SLars-Peter Clausen /**
1199b36f09c3SLars-Peter Clausen  * dmaengine_terminate_sync() - Terminate all active DMA transfers
1200b36f09c3SLars-Peter Clausen  * @chan: The channel for which to terminate the transfers
1201b36f09c3SLars-Peter Clausen  *
1202b36f09c3SLars-Peter Clausen  * Calling this function will terminate all active and pending transfers
1203b36f09c3SLars-Peter Clausen  * that have previously been submitted to the channel. It is similar to
1204b36f09c3SLars-Peter Clausen  * dmaengine_terminate_async() but guarantees that the DMA transfer has actually
1205b36f09c3SLars-Peter Clausen  * stopped and that all complete callbacks have finished running when the
1206b36f09c3SLars-Peter Clausen  * function returns.
1207b36f09c3SLars-Peter Clausen  *
1208b36f09c3SLars-Peter Clausen  * This function must only be called from non-atomic context and must not be
1209b36f09c3SLars-Peter Clausen  * called from within a complete callback of a descriptor submitted on the same
1210b36f09c3SLars-Peter Clausen  * channel.
1211b36f09c3SLars-Peter Clausen  */
dmaengine_terminate_sync(struct dma_chan * chan)1212b36f09c3SLars-Peter Clausen static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1213b36f09c3SLars-Peter Clausen {
1214b36f09c3SLars-Peter Clausen 	int ret;
1215b36f09c3SLars-Peter Clausen 
1216b36f09c3SLars-Peter Clausen 	ret = dmaengine_terminate_async(chan);
1217b36f09c3SLars-Peter Clausen 	if (ret)
1218b36f09c3SLars-Peter Clausen 		return ret;
1219b36f09c3SLars-Peter Clausen 
1220b36f09c3SLars-Peter Clausen 	dmaengine_synchronize(chan);
1221b36f09c3SLars-Peter Clausen 
1222b36f09c3SLars-Peter Clausen 	return 0;
1223b36f09c3SLars-Peter Clausen }
1224b36f09c3SLars-Peter Clausen 
dmaengine_pause(struct dma_chan * chan)12256e3ecaf0SSascha Hauer static inline int dmaengine_pause(struct dma_chan *chan)
12266e3ecaf0SSascha Hauer {
122723a3ea2fSMaxime Ripard 	if (chan->device->device_pause)
122823a3ea2fSMaxime Ripard 		return chan->device->device_pause(chan);
122923a3ea2fSMaxime Ripard 
12302c44ad91SMaxime Ripard 	return -ENOSYS;
12316e3ecaf0SSascha Hauer }
12326e3ecaf0SSascha Hauer 
dmaengine_resume(struct dma_chan * chan)12336e3ecaf0SSascha Hauer static inline int dmaengine_resume(struct dma_chan *chan)
12346e3ecaf0SSascha Hauer {
123523a3ea2fSMaxime Ripard 	if (chan->device->device_resume)
123623a3ea2fSMaxime Ripard 		return chan->device->device_resume(chan);
123723a3ea2fSMaxime Ripard 
12382c44ad91SMaxime Ripard 	return -ENOSYS;
12396e3ecaf0SSascha Hauer }
12406e3ecaf0SSascha Hauer 
dmaengine_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)12413052cc2cSLars-Peter Clausen static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
12423052cc2cSLars-Peter Clausen 	dma_cookie_t cookie, struct dma_tx_state *state)
12433052cc2cSLars-Peter Clausen {
12443052cc2cSLars-Peter Clausen 	return chan->device->device_tx_status(chan, cookie, state);
12453052cc2cSLars-Peter Clausen }
12463052cc2cSLars-Peter Clausen 
dmaengine_submit(struct dma_async_tx_descriptor * desc)124798d530feSRussell King - ARM Linux static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
12486e3ecaf0SSascha Hauer {
12496e3ecaf0SSascha Hauer 	return desc->tx_submit(desc);
12506e3ecaf0SSascha Hauer }
12516e3ecaf0SSascha Hauer 
dmaengine_check_align(enum dmaengine_alignment align,size_t off1,size_t off2,size_t len)125277a68e56SMaxime Ripard static inline bool dmaengine_check_align(enum dmaengine_alignment align,
125377a68e56SMaxime Ripard 					 size_t off1, size_t off2, size_t len)
125483544ae9SDan Williams {
125588ac039cSAndy Shevchenko 	return !(((1 << align) - 1) & (off1 | off2 | len));
125683544ae9SDan Williams }
125783544ae9SDan Williams 
is_dma_copy_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)125883544ae9SDan Williams static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
125983544ae9SDan Williams 				       size_t off2, size_t len)
126083544ae9SDan Williams {
126183544ae9SDan Williams 	return dmaengine_check_align(dev->copy_align, off1, off2, len);
126283544ae9SDan Williams }
126383544ae9SDan Williams 
is_dma_xor_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)126483544ae9SDan Williams static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
126583544ae9SDan Williams 				      size_t off2, size_t len)
126683544ae9SDan Williams {
126783544ae9SDan Williams 	return dmaengine_check_align(dev->xor_align, off1, off2, len);
126883544ae9SDan Williams }
126983544ae9SDan Williams 
is_dma_pq_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)127083544ae9SDan Williams static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
127183544ae9SDan Williams 				     size_t off2, size_t len)
127283544ae9SDan Williams {
127383544ae9SDan Williams 	return dmaengine_check_align(dev->pq_align, off1, off2, len);
127483544ae9SDan Williams }
127583544ae9SDan Williams 
is_dma_fill_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)12764983a501SMaxime Ripard static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
12774983a501SMaxime Ripard 				       size_t off2, size_t len)
12784983a501SMaxime Ripard {
12794983a501SMaxime Ripard 	return dmaengine_check_align(dev->fill_align, off1, off2, len);
12804983a501SMaxime Ripard }
12814983a501SMaxime Ripard 
1282b2f46fd8SDan Williams static inline void
dma_set_maxpq(struct dma_device * dma,int maxpq,int has_pq_continue)1283b2f46fd8SDan Williams dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1284b2f46fd8SDan Williams {
1285b2f46fd8SDan Williams 	dma->max_pq = maxpq;
1286b2f46fd8SDan Williams 	if (has_pq_continue)
1287b2f46fd8SDan Williams 		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1288b2f46fd8SDan Williams }
1289b2f46fd8SDan Williams 
dmaf_continue(enum dma_ctrl_flags flags)1290b2f46fd8SDan Williams static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1291b2f46fd8SDan Williams {
1292b2f46fd8SDan Williams 	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1293b2f46fd8SDan Williams }
1294b2f46fd8SDan Williams 
dmaf_p_disabled_continue(enum dma_ctrl_flags flags)1295b2f46fd8SDan Williams static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1296b2f46fd8SDan Williams {
1297b2f46fd8SDan Williams 	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1298b2f46fd8SDan Williams 
1299b2f46fd8SDan Williams 	return (flags & mask) == mask;
1300b2f46fd8SDan Williams }
1301b2f46fd8SDan Williams 
dma_dev_has_pq_continue(struct dma_device * dma)1302b2f46fd8SDan Williams static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1303b2f46fd8SDan Williams {
1304b2f46fd8SDan Williams 	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1305b2f46fd8SDan Williams }
1306b2f46fd8SDan Williams 
dma_dev_to_maxpq(struct dma_device * dma)1307d3f3cf85SMathieu Lacage static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1308b2f46fd8SDan Williams {
1309b2f46fd8SDan Williams 	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1310b2f46fd8SDan Williams }
1311b2f46fd8SDan Williams 
1312b2f46fd8SDan Williams /* dma_maxpq - reduce maxpq in the face of continued operations
1313b2f46fd8SDan Williams  * @dma - dma device with PQ capability
1314b2f46fd8SDan Williams  * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
1315b2f46fd8SDan Williams  *
1316b2f46fd8SDan Williams  * When an engine does not support native continuation we need 3 extra
1317b2f46fd8SDan Williams  * source slots to reuse P and Q with the following coefficients:
1318b2f46fd8SDan Williams  * 1/ {00} * P : remove P from Q', but use it as a source for P'
1319b2f46fd8SDan Williams  * 2/ {01} * Q : use Q to continue Q' calculation
1320b2f46fd8SDan Williams  * 3/ {00} * Q : subtract Q from P' to cancel (2)
1321b2f46fd8SDan Williams  *
1322b2f46fd8SDan Williams  * In the case where P is disabled we only need 1 extra source:
1323b2f46fd8SDan Williams  * 1/ {01} * Q : use Q to continue Q' calculation
1324b2f46fd8SDan Williams  */
dma_maxpq(struct dma_device * dma,enum dma_ctrl_flags flags)1325b2f46fd8SDan Williams static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1326b2f46fd8SDan Williams {
1327b2f46fd8SDan Williams 	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1328b2f46fd8SDan Williams 		return dma_dev_to_maxpq(dma);
13295f77dd85SAndy Shevchenko 	if (dmaf_p_disabled_continue(flags))
1330b2f46fd8SDan Williams 		return dma_dev_to_maxpq(dma) - 1;
13315f77dd85SAndy Shevchenko 	if (dmaf_continue(flags))
1332b2f46fd8SDan Williams 		return dma_dev_to_maxpq(dma) - 3;
1333b2f46fd8SDan Williams 	BUG();
1334b2f46fd8SDan Williams }
1335b2f46fd8SDan Williams 
dmaengine_get_icg(bool inc,bool sgl,size_t icg,size_t dir_icg)133687d001efSMaxime Ripard static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
133787d001efSMaxime Ripard 				      size_t dir_icg)
133887d001efSMaxime Ripard {
133987d001efSMaxime Ripard 	if (inc) {
134087d001efSMaxime Ripard 		if (dir_icg)
134187d001efSMaxime Ripard 			return dir_icg;
13425f77dd85SAndy Shevchenko 		if (sgl)
134387d001efSMaxime Ripard 			return icg;
134487d001efSMaxime Ripard 	}
134587d001efSMaxime Ripard 
134687d001efSMaxime Ripard 	return 0;
134787d001efSMaxime Ripard }
134887d001efSMaxime Ripard 
dmaengine_get_dst_icg(struct dma_interleaved_template * xt,struct data_chunk * chunk)134987d001efSMaxime Ripard static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
135087d001efSMaxime Ripard 					   struct data_chunk *chunk)
135187d001efSMaxime Ripard {
135287d001efSMaxime Ripard 	return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
135387d001efSMaxime Ripard 				 chunk->icg, chunk->dst_icg);
135487d001efSMaxime Ripard }
135587d001efSMaxime Ripard 
dmaengine_get_src_icg(struct dma_interleaved_template * xt,struct data_chunk * chunk)135687d001efSMaxime Ripard static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
135787d001efSMaxime Ripard 					   struct data_chunk *chunk)
135887d001efSMaxime Ripard {
135987d001efSMaxime Ripard 	return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
136087d001efSMaxime Ripard 				 chunk->icg, chunk->src_icg);
136187d001efSMaxime Ripard }
136287d001efSMaxime Ripard 
1363c13c8260SChris Leech /* --- public DMA engine API --- */
1364c13c8260SChris Leech 
1365649274d9SDan Williams #ifdef CONFIG_DMA_ENGINE
1366209b84a8SDan Williams void dmaengine_get(void);
1367209b84a8SDan Williams void dmaengine_put(void);
1368649274d9SDan Williams #else
dmaengine_get(void)1369649274d9SDan Williams static inline void dmaengine_get(void)
1370649274d9SDan Williams {
1371649274d9SDan Williams }
dmaengine_put(void)1372649274d9SDan Williams static inline void dmaengine_put(void)
1373649274d9SDan Williams {
1374649274d9SDan Williams }
1375649274d9SDan Williams #endif
1376649274d9SDan Williams 
1377729b5d1bSDan Williams #ifdef CONFIG_ASYNC_TX_DMA
1378729b5d1bSDan Williams #define async_dmaengine_get()	dmaengine_get()
1379729b5d1bSDan Williams #define async_dmaengine_put()	dmaengine_put()
13805fc6d897SDan Williams #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1381138f4c35SDan Williams #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1382138f4c35SDan Williams #else
1383729b5d1bSDan Williams #define async_dma_find_channel(type) dma_find_channel(type)
13845fc6d897SDan Williams #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
1385729b5d1bSDan Williams #else
async_dmaengine_get(void)1386729b5d1bSDan Williams static inline void async_dmaengine_get(void)
1387729b5d1bSDan Williams {
1388729b5d1bSDan Williams }
async_dmaengine_put(void)1389729b5d1bSDan Williams static inline void async_dmaengine_put(void)
1390729b5d1bSDan Williams {
1391729b5d1bSDan Williams }
1392729b5d1bSDan Williams static inline struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)1393729b5d1bSDan Williams async_dma_find_channel(enum dma_transaction_type type)
1394729b5d1bSDan Williams {
1395729b5d1bSDan Williams 	return NULL;
1396729b5d1bSDan Williams }
1397138f4c35SDan Williams #endif /* CONFIG_ASYNC_TX_DMA */
13987405f74bSDan Williams void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
13997405f74bSDan Williams 				  struct dma_chan *chan);
1400c13c8260SChris Leech 
async_tx_ack(struct dma_async_tx_descriptor * tx)14010839875eSDan Williams static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
14027405f74bSDan Williams {
1403636bdeaaSDan Williams 	tx->flags |= DMA_CTRL_ACK;
1404636bdeaaSDan Williams }
1405636bdeaaSDan Williams 
async_tx_clear_ack(struct dma_async_tx_descriptor * tx)1406ef560682SGuennadi Liakhovetski static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1407ef560682SGuennadi Liakhovetski {
1408ef560682SGuennadi Liakhovetski 	tx->flags &= ~DMA_CTRL_ACK;
1409ef560682SGuennadi Liakhovetski }
1410ef560682SGuennadi Liakhovetski 
async_tx_test_ack(struct dma_async_tx_descriptor * tx)14110839875eSDan Williams static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1412636bdeaaSDan Williams {
14130839875eSDan Williams 	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1414c13c8260SChris Leech }
1415c13c8260SChris Leech 
14167405f74bSDan Williams #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
14177405f74bSDan Williams static inline void
__dma_cap_set(enum dma_transaction_type tx_type,dma_cap_mask_t * dstp)14187405f74bSDan Williams __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
14197405f74bSDan Williams {
14207405f74bSDan Williams 	set_bit(tx_type, dstp->bits);
14217405f74bSDan Williams }
14227405f74bSDan Williams 
14230f571515SAtsushi Nemoto #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
14240f571515SAtsushi Nemoto static inline void
__dma_cap_clear(enum dma_transaction_type tx_type,dma_cap_mask_t * dstp)14250f571515SAtsushi Nemoto __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
14260f571515SAtsushi Nemoto {
14270f571515SAtsushi Nemoto 	clear_bit(tx_type, dstp->bits);
14280f571515SAtsushi Nemoto }
14290f571515SAtsushi Nemoto 
143033df8ca0SDan Williams #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
__dma_cap_zero(dma_cap_mask_t * dstp)143133df8ca0SDan Williams static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
143233df8ca0SDan Williams {
143333df8ca0SDan Williams 	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
143433df8ca0SDan Williams }
143533df8ca0SDan Williams 
14367405f74bSDan Williams #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
14377405f74bSDan Williams static inline int
__dma_has_cap(enum dma_transaction_type tx_type,dma_cap_mask_t * srcp)14387405f74bSDan Williams __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
14397405f74bSDan Williams {
14407405f74bSDan Williams 	return test_bit(tx_type, srcp->bits);
14417405f74bSDan Williams }
14427405f74bSDan Williams 
14437405f74bSDan Williams #define for_each_dma_cap_mask(cap, mask) \
1444e5a087fdSAkinobu Mita 	for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
14457405f74bSDan Williams 
1446c13c8260SChris Leech /**
14477405f74bSDan Williams  * dma_async_issue_pending - flush pending transactions to HW
1448fe4ada2dSRandy Dunlap  * @chan: target DMA channel
1449c13c8260SChris Leech  *
1450c13c8260SChris Leech  * This allows drivers to push copies to HW in batches,
1451c13c8260SChris Leech  * reducing MMIO writes where possible.
1452c13c8260SChris Leech  */
dma_async_issue_pending(struct dma_chan * chan)14537405f74bSDan Williams static inline void dma_async_issue_pending(struct dma_chan *chan)
1454c13c8260SChris Leech {
1455ec8670f1SDan Williams 	chan->device->device_issue_pending(chan);
1456c13c8260SChris Leech }
1457c13c8260SChris Leech 
1458c13c8260SChris Leech /**
14597405f74bSDan Williams  * dma_async_is_tx_complete - poll for transaction completion
1460c13c8260SChris Leech  * @chan: DMA channel
1461c13c8260SChris Leech  * @cookie: transaction identifier to check status of
1462c13c8260SChris Leech  * @last: returns last completed cookie, can be NULL
1463c13c8260SChris Leech  * @used: returns last issued cookie, can be NULL
1464c13c8260SChris Leech  *
1465c13c8260SChris Leech  * If @last and @used are passed in, upon return they reflect the driver
1466c13c8260SChris Leech  * internal state and can be used with dma_async_is_complete() to check
1467c13c8260SChris Leech  * the status of multiple cookies without re-checking hardware state.
1468c13c8260SChris Leech  */
dma_async_is_tx_complete(struct dma_chan * chan,dma_cookie_t cookie,dma_cookie_t * last,dma_cookie_t * used)14697405f74bSDan Williams static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1470c13c8260SChris Leech 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1471c13c8260SChris Leech {
147207934481SLinus Walleij 	struct dma_tx_state state;
147307934481SLinus Walleij 	enum dma_status status;
147407934481SLinus Walleij 
147507934481SLinus Walleij 	status = chan->device->device_tx_status(chan, cookie, &state);
147607934481SLinus Walleij 	if (last)
147707934481SLinus Walleij 		*last = state.last;
147807934481SLinus Walleij 	if (used)
147907934481SLinus Walleij 		*used = state.used;
148007934481SLinus Walleij 	return status;
1481c13c8260SChris Leech }
1482c13c8260SChris Leech 
1483c13c8260SChris Leech /**
1484c13c8260SChris Leech  * dma_async_is_complete - test a cookie against chan state
1485c13c8260SChris Leech  * @cookie: transaction identifier to test status of
1486c13c8260SChris Leech  * @last_complete: last know completed transaction
1487c13c8260SChris Leech  * @last_used: last cookie value handed out
1488c13c8260SChris Leech  *
1489e239345fSBartlomiej Zolnierkiewicz  * dma_async_is_complete() is used in dma_async_is_tx_complete()
14908a5703f8SSebastian Siewior  * the test logic is separated for lightweight testing of multiple cookies
1491c13c8260SChris Leech  */
dma_async_is_complete(dma_cookie_t cookie,dma_cookie_t last_complete,dma_cookie_t last_used)1492c13c8260SChris Leech static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1493c13c8260SChris Leech 			dma_cookie_t last_complete, dma_cookie_t last_used)
1494c13c8260SChris Leech {
1495c13c8260SChris Leech 	if (last_complete <= last_used) {
1496c13c8260SChris Leech 		if ((cookie <= last_complete) || (cookie > last_used))
1497adfedd9aSVinod Koul 			return DMA_COMPLETE;
1498c13c8260SChris Leech 	} else {
1499c13c8260SChris Leech 		if ((cookie <= last_complete) && (cookie > last_used))
1500adfedd9aSVinod Koul 			return DMA_COMPLETE;
1501c13c8260SChris Leech 	}
1502c13c8260SChris Leech 	return DMA_IN_PROGRESS;
1503c13c8260SChris Leech }
1504c13c8260SChris Leech 
1505bca34692SDan Williams static inline void
dma_set_tx_state(struct dma_tx_state * st,dma_cookie_t last,dma_cookie_t used,u32 residue)1506bca34692SDan Williams dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1507bca34692SDan Williams {
15083a92063bSAndy Shevchenko 	if (!st)
15093a92063bSAndy Shevchenko 		return;
15103a92063bSAndy Shevchenko 
1511bca34692SDan Williams 	st->last = last;
1512bca34692SDan Williams 	st->used = used;
1513bca34692SDan Williams 	st->residue = residue;
1514bca34692SDan Williams }
1515bca34692SDan Williams 
151607f2211eSDan Williams #ifdef CONFIG_DMA_ENGINE
15174a43f394SJon Mason struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
15184a43f394SJon Mason enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
151907f2211eSDan Williams enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1520c50331e8SDan Williams void dma_issue_pending_all(void);
1521a53e28daSLars-Peter Clausen struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1522f5151311SBaolin Wang 				       dma_filter_fn fn, void *fn_param,
1523f5151311SBaolin Wang 				       struct device_node *np);
1524a8135d0dSPeter Ujfalusi 
1525a8135d0dSPeter Ujfalusi struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1526a8135d0dSPeter Ujfalusi struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1527a8135d0dSPeter Ujfalusi 
15288f33d527SGuennadi Liakhovetski void dma_release_channel(struct dma_chan *chan);
1529fdb8df99SLaurent Pinchart int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
153007f2211eSDan Williams #else
dma_find_channel(enum dma_transaction_type tx_type)15314a43f394SJon Mason static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
15324a43f394SJon Mason {
15334a43f394SJon Mason 	return NULL;
15344a43f394SJon Mason }
dma_sync_wait(struct dma_chan * chan,dma_cookie_t cookie)15354a43f394SJon Mason static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
15364a43f394SJon Mason {
1537adfedd9aSVinod Koul 	return DMA_COMPLETE;
15384a43f394SJon Mason }
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)153907f2211eSDan Williams static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
154007f2211eSDan Williams {
1541adfedd9aSVinod Koul 	return DMA_COMPLETE;
154207f2211eSDan Williams }
dma_issue_pending_all(void)1543c50331e8SDan Williams static inline void dma_issue_pending_all(void)
1544c50331e8SDan Williams {
15458f33d527SGuennadi Liakhovetski }
__dma_request_channel(const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param,struct device_node * np)1546a53e28daSLars-Peter Clausen static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1547f5151311SBaolin Wang 						     dma_filter_fn fn,
1548f5151311SBaolin Wang 						     void *fn_param,
1549f5151311SBaolin Wang 						     struct device_node *np)
15508f33d527SGuennadi Liakhovetski {
15518f33d527SGuennadi Liakhovetski 	return NULL;
15528f33d527SGuennadi Liakhovetski }
dma_request_chan(struct device * dev,const char * name)1553a8135d0dSPeter Ujfalusi static inline struct dma_chan *dma_request_chan(struct device *dev,
1554a8135d0dSPeter Ujfalusi 						const char *name)
1555a8135d0dSPeter Ujfalusi {
1556a8135d0dSPeter Ujfalusi 	return ERR_PTR(-ENODEV);
1557a8135d0dSPeter Ujfalusi }
dma_request_chan_by_mask(const dma_cap_mask_t * mask)1558a8135d0dSPeter Ujfalusi static inline struct dma_chan *dma_request_chan_by_mask(
1559a8135d0dSPeter Ujfalusi 						const dma_cap_mask_t *mask)
1560a8135d0dSPeter Ujfalusi {
1561a8135d0dSPeter Ujfalusi 	return ERR_PTR(-ENODEV);
1562a8135d0dSPeter Ujfalusi }
dma_release_channel(struct dma_chan * chan)15638f33d527SGuennadi Liakhovetski static inline void dma_release_channel(struct dma_chan *chan)
15648f33d527SGuennadi Liakhovetski {
1565c50331e8SDan Williams }
dma_get_slave_caps(struct dma_chan * chan,struct dma_slave_caps * caps)1566fdb8df99SLaurent Pinchart static inline int dma_get_slave_caps(struct dma_chan *chan,
1567fdb8df99SLaurent Pinchart 				     struct dma_slave_caps *caps)
1568fdb8df99SLaurent Pinchart {
1569fdb8df99SLaurent Pinchart 	return -ENXIO;
1570fdb8df99SLaurent Pinchart }
157107f2211eSDan Williams #endif
1572c13c8260SChris Leech 
dmaengine_desc_set_reuse(struct dma_async_tx_descriptor * tx)157327242021SVinod Koul static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
157427242021SVinod Koul {
157527242021SVinod Koul 	struct dma_slave_caps caps;
157653a256a9SLukas Wunner 	int ret;
157727242021SVinod Koul 
157853a256a9SLukas Wunner 	ret = dma_get_slave_caps(tx->chan, &caps);
157953a256a9SLukas Wunner 	if (ret)
158053a256a9SLukas Wunner 		return ret;
158127242021SVinod Koul 
15823a92063bSAndy Shevchenko 	if (!caps.descriptor_reuse)
15833a92063bSAndy Shevchenko 		return -EPERM;
15843a92063bSAndy Shevchenko 
158527242021SVinod Koul 	tx->flags |= DMA_CTRL_REUSE;
158627242021SVinod Koul 	return 0;
158727242021SVinod Koul }
158827242021SVinod Koul 
dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor * tx)158927242021SVinod Koul static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
159027242021SVinod Koul {
159127242021SVinod Koul 	tx->flags &= ~DMA_CTRL_REUSE;
159227242021SVinod Koul }
159327242021SVinod Koul 
dmaengine_desc_test_reuse(struct dma_async_tx_descriptor * tx)159427242021SVinod Koul static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
159527242021SVinod Koul {
159627242021SVinod Koul 	return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
159727242021SVinod Koul }
159827242021SVinod Koul 
dmaengine_desc_free(struct dma_async_tx_descriptor * desc)159927242021SVinod Koul static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
160027242021SVinod Koul {
160127242021SVinod Koul 	/* this is supported for reusable desc, so check that */
16023a92063bSAndy Shevchenko 	if (!dmaengine_desc_test_reuse(desc))
160327242021SVinod Koul 		return -EPERM;
16043a92063bSAndy Shevchenko 
16053a92063bSAndy Shevchenko 	return desc->desc_free(desc);
160627242021SVinod Koul }
160727242021SVinod Koul 
1608c13c8260SChris Leech /* --- DMA device --- */
1609c13c8260SChris Leech 
1610c13c8260SChris Leech int dma_async_device_register(struct dma_device *device);
1611f39b948dSHuang Shijie int dmaenginem_async_device_register(struct dma_device *device);
1612c13c8260SChris Leech void dma_async_device_unregister(struct dma_device *device);
1613e81274cdSDave Jiang int dma_async_device_channel_register(struct dma_device *device,
161410b8e0fdSAmelie Delaunay 				      struct dma_chan *chan,
161510b8e0fdSAmelie Delaunay 				      const char *name);
1616e81274cdSDave Jiang void dma_async_device_channel_unregister(struct dma_device *device,
1617e81274cdSDave Jiang 					 struct dma_chan *chan);
161807f2211eSDan Williams void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1619f5151311SBaolin Wang #define dma_request_channel(mask, x, y) \
1620f5151311SBaolin Wang 	__dma_request_channel(&(mask), x, y, NULL)
1621864ef69bSMatt Porter 
16227547dbd3SPeter Ujfalusi /* Deprecated, please use dma_request_chan() directly */
16237547dbd3SPeter Ujfalusi static inline struct dma_chan * __deprecated
dma_request_slave_channel(struct device * dev,const char * name)16247547dbd3SPeter Ujfalusi dma_request_slave_channel(struct device *dev, const char *name)
16257547dbd3SPeter Ujfalusi {
16267547dbd3SPeter Ujfalusi 	struct dma_chan *ch = dma_request_chan(dev, name);
16277547dbd3SPeter Ujfalusi 
16287547dbd3SPeter Ujfalusi 	return IS_ERR(ch) ? NULL : ch;
16297547dbd3SPeter Ujfalusi }
16307547dbd3SPeter Ujfalusi 
1631864ef69bSMatt Porter static inline struct dma_chan
dma_request_slave_channel_compat(const dma_cap_mask_t mask,dma_filter_fn fn,void * fn_param,struct device * dev,const char * name)163271ca5b78SGeert Uytterhoeven *dma_request_slave_channel_compat(const dma_cap_mask_t mask,
1633a53e28daSLars-Peter Clausen 				  dma_filter_fn fn, void *fn_param,
16341dc04288SJarkko Nikula 				  struct device *dev, const char *name)
1635864ef69bSMatt Porter {
1636864ef69bSMatt Porter 	struct dma_chan *chan;
1637864ef69bSMatt Porter 
163831d43141SAndy Shevchenko 	chan = dma_request_chan(dev, name);
163931d43141SAndy Shevchenko 	if (!IS_ERR(chan))
1640864ef69bSMatt Porter 		return chan;
1641864ef69bSMatt Porter 
16427dfffb95SGeert Uytterhoeven 	if (!fn || !fn_param)
16437dfffb95SGeert Uytterhoeven 		return NULL;
16447dfffb95SGeert Uytterhoeven 
1645*1c83d3dfSAndy Shevchenko 	return dma_request_channel(mask, fn, fn_param);
1646864ef69bSMatt Porter }
1647816ebf48SPeter Ujfalusi 
1648816ebf48SPeter Ujfalusi static inline char *
dmaengine_get_direction_text(enum dma_transfer_direction dir)1649816ebf48SPeter Ujfalusi dmaengine_get_direction_text(enum dma_transfer_direction dir)
1650816ebf48SPeter Ujfalusi {
1651816ebf48SPeter Ujfalusi 	switch (dir) {
1652816ebf48SPeter Ujfalusi 	case DMA_DEV_TO_MEM:
1653816ebf48SPeter Ujfalusi 		return "DEV_TO_MEM";
1654816ebf48SPeter Ujfalusi 	case DMA_MEM_TO_DEV:
1655816ebf48SPeter Ujfalusi 		return "MEM_TO_DEV";
1656816ebf48SPeter Ujfalusi 	case DMA_MEM_TO_MEM:
1657816ebf48SPeter Ujfalusi 		return "MEM_TO_MEM";
1658816ebf48SPeter Ujfalusi 	case DMA_DEV_TO_DEV:
1659816ebf48SPeter Ujfalusi 		return "DEV_TO_DEV";
1660816ebf48SPeter Ujfalusi 	default:
1661816ebf48SPeter Ujfalusi 		return "invalid";
1662de5506e1SChris Leech 	}
16631873300aSAndy Shevchenko }
1664ab650ef6SPeter Ujfalusi 
dmaengine_get_dma_device(struct dma_chan * chan)1665ab650ef6SPeter Ujfalusi static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
1666ab650ef6SPeter Ujfalusi {
1667ab650ef6SPeter Ujfalusi 	if (chan->dev->chan_dma_dev)
1668ab650ef6SPeter Ujfalusi 		return &chan->dev->device;
1669ab650ef6SPeter Ujfalusi 
1670ab650ef6SPeter Ujfalusi 	return chan->device->dev;
1671ab650ef6SPeter Ujfalusi }
1672ab650ef6SPeter Ujfalusi 
1673c13c8260SChris Leech #endif /* DMAENGINE_H */
1674