xref: /linux-6.15/include/linux/dmaengine.h (revision b7f080cf)
1c13c8260SChris Leech /*
2c13c8260SChris Leech  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3c13c8260SChris Leech  *
4c13c8260SChris Leech  * This program is free software; you can redistribute it and/or modify it
5c13c8260SChris Leech  * under the terms of the GNU General Public License as published by the Free
6c13c8260SChris Leech  * Software Foundation; either version 2 of the License, or (at your option)
7c13c8260SChris Leech  * any later version.
8c13c8260SChris Leech  *
9c13c8260SChris Leech  * This program is distributed in the hope that it will be useful, but WITHOUT
10c13c8260SChris Leech  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11c13c8260SChris Leech  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12c13c8260SChris Leech  * more details.
13c13c8260SChris Leech  *
14c13c8260SChris Leech  * You should have received a copy of the GNU General Public License along with
15c13c8260SChris Leech  * this program; if not, write to the Free Software Foundation, Inc., 59
16c13c8260SChris Leech  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17c13c8260SChris Leech  *
18c13c8260SChris Leech  * The full GNU General Public License is included in this distribution in the
19c13c8260SChris Leech  * file called COPYING.
20c13c8260SChris Leech  */
21c13c8260SChris Leech #ifndef DMAENGINE_H
22c13c8260SChris Leech #define DMAENGINE_H
231c0f16e5SDavid Woodhouse 
24c13c8260SChris Leech #include <linux/device.h>
25c13c8260SChris Leech #include <linux/uio.h>
26*b7f080cfSAlexey Dobriyan #include <linux/dma-direction.h>
27*b7f080cfSAlexey Dobriyan 
28*b7f080cfSAlexey Dobriyan struct scatterlist;
29c13c8260SChris Leech 
30c13c8260SChris Leech /**
31fe4ada2dSRandy Dunlap  * typedef dma_cookie_t - an opaque DMA cookie
32c13c8260SChris Leech  *
33c13c8260SChris Leech  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
34c13c8260SChris Leech  */
35c13c8260SChris Leech typedef s32 dma_cookie_t;
3676bd061fSSteven J. Magnani #define DMA_MIN_COOKIE	1
3776bd061fSSteven J. Magnani #define DMA_MAX_COOKIE	INT_MAX
38c13c8260SChris Leech 
39c13c8260SChris Leech #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
40c13c8260SChris Leech 
41c13c8260SChris Leech /**
42c13c8260SChris Leech  * enum dma_status - DMA transaction status
43c13c8260SChris Leech  * @DMA_SUCCESS: transaction completed successfully
44c13c8260SChris Leech  * @DMA_IN_PROGRESS: transaction not yet processed
4507934481SLinus Walleij  * @DMA_PAUSED: transaction is paused
46c13c8260SChris Leech  * @DMA_ERROR: transaction failed
47c13c8260SChris Leech  */
48c13c8260SChris Leech enum dma_status {
49c13c8260SChris Leech 	DMA_SUCCESS,
50c13c8260SChris Leech 	DMA_IN_PROGRESS,
5107934481SLinus Walleij 	DMA_PAUSED,
52c13c8260SChris Leech 	DMA_ERROR,
53c13c8260SChris Leech };
54c13c8260SChris Leech 
55c13c8260SChris Leech /**
567405f74bSDan Williams  * enum dma_transaction_type - DMA transaction types/indexes
57138f4c35SDan Williams  *
58138f4c35SDan Williams  * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
59138f4c35SDan Williams  * automatically set as dma devices are registered.
607405f74bSDan Williams  */
617405f74bSDan Williams enum dma_transaction_type {
627405f74bSDan Williams 	DMA_MEMCPY,
637405f74bSDan Williams 	DMA_XOR,
64b2f46fd8SDan Williams 	DMA_PQ,
65099f53cbSDan Williams 	DMA_XOR_VAL,
66099f53cbSDan Williams 	DMA_PQ_VAL,
677405f74bSDan Williams 	DMA_MEMSET,
687405f74bSDan Williams 	DMA_INTERRUPT,
69a86ee03cSIra Snyder 	DMA_SG,
7059b5ec21SDan Williams 	DMA_PRIVATE,
71138f4c35SDan Williams 	DMA_ASYNC_TX,
72dc0ee643SHaavard Skinnemoen 	DMA_SLAVE,
73782bc950SSascha Hauer 	DMA_CYCLIC,
747405f74bSDan Williams };
757405f74bSDan Williams 
767405f74bSDan Williams /* last transaction type for creation of the capabilities mask */
77782bc950SSascha Hauer #define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
78dc0ee643SHaavard Skinnemoen 
797405f74bSDan Williams 
807405f74bSDan Williams /**
81636bdeaaSDan Williams  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
82636bdeaaSDan Williams  *  control completion, and communicate status.
83d4c56f97SDan Williams  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
84d4c56f97SDan Williams  *  this transaction
85a88f6667SGuennadi Liakhovetski  * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
86b2f46fd8SDan Williams  *  acknowledges receipt, i.e. has has a chance to establish any dependency
87b2f46fd8SDan Williams  *  chains
88e1d181efSDan Williams  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
89e1d181efSDan Williams  * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
904f005dbeSMaciej Sosnowski  * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
914f005dbeSMaciej Sosnowski  * 	(if not set, do the source dma-unmapping as page)
924f005dbeSMaciej Sosnowski  * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
934f005dbeSMaciej Sosnowski  * 	(if not set, do the destination dma-unmapping as page)
94b2f46fd8SDan Williams  * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
95b2f46fd8SDan Williams  * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
96b2f46fd8SDan Williams  * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
97b2f46fd8SDan Williams  *  sources that were the result of a previous operation, in the case of a PQ
98b2f46fd8SDan Williams  *  operation it continues the calculation with new sources
990403e382SDan Williams  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
1000403e382SDan Williams  *  on the result of this operation
101d4c56f97SDan Williams  */
102636bdeaaSDan Williams enum dma_ctrl_flags {
103d4c56f97SDan Williams 	DMA_PREP_INTERRUPT = (1 << 0),
104636bdeaaSDan Williams 	DMA_CTRL_ACK = (1 << 1),
105e1d181efSDan Williams 	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
106e1d181efSDan Williams 	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
1074f005dbeSMaciej Sosnowski 	DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
1084f005dbeSMaciej Sosnowski 	DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
109f9dd2134SDan Williams 	DMA_PREP_PQ_DISABLE_P = (1 << 6),
110f9dd2134SDan Williams 	DMA_PREP_PQ_DISABLE_Q = (1 << 7),
111f9dd2134SDan Williams 	DMA_PREP_CONTINUE = (1 << 8),
1120403e382SDan Williams 	DMA_PREP_FENCE = (1 << 9),
113d4c56f97SDan Williams };
114d4c56f97SDan Williams 
115d4c56f97SDan Williams /**
116c3635c78SLinus Walleij  * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
117c3635c78SLinus Walleij  * on a running channel.
118c3635c78SLinus Walleij  * @DMA_TERMINATE_ALL: terminate all ongoing transfers
119c3635c78SLinus Walleij  * @DMA_PAUSE: pause ongoing transfers
120c3635c78SLinus Walleij  * @DMA_RESUME: resume paused transfer
121c156d0a5SLinus Walleij  * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
122c156d0a5SLinus Walleij  * that need to runtime reconfigure the slave channels (as opposed to passing
123c156d0a5SLinus Walleij  * configuration data in statically from the platform). An additional
124c156d0a5SLinus Walleij  * argument of struct dma_slave_config must be passed in with this
125c156d0a5SLinus Walleij  * command.
126968f19aeSIra Snyder  * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
127968f19aeSIra Snyder  * into external start mode.
128c3635c78SLinus Walleij  */
129c3635c78SLinus Walleij enum dma_ctrl_cmd {
130c3635c78SLinus Walleij 	DMA_TERMINATE_ALL,
131c3635c78SLinus Walleij 	DMA_PAUSE,
132c3635c78SLinus Walleij 	DMA_RESUME,
133c156d0a5SLinus Walleij 	DMA_SLAVE_CONFIG,
134968f19aeSIra Snyder 	FSLDMA_EXTERNAL_START,
135c3635c78SLinus Walleij };
136c3635c78SLinus Walleij 
137c3635c78SLinus Walleij /**
138ad283ea4SDan Williams  * enum sum_check_bits - bit position of pq_check_flags
139ad283ea4SDan Williams  */
140ad283ea4SDan Williams enum sum_check_bits {
141ad283ea4SDan Williams 	SUM_CHECK_P = 0,
142ad283ea4SDan Williams 	SUM_CHECK_Q = 1,
143ad283ea4SDan Williams };
144ad283ea4SDan Williams 
145ad283ea4SDan Williams /**
146ad283ea4SDan Williams  * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
147ad283ea4SDan Williams  * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
148ad283ea4SDan Williams  * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
149ad283ea4SDan Williams  */
150ad283ea4SDan Williams enum sum_check_flags {
151ad283ea4SDan Williams 	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
152ad283ea4SDan Williams 	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
153ad283ea4SDan Williams };
154ad283ea4SDan Williams 
155ad283ea4SDan Williams 
156ad283ea4SDan Williams /**
1577405f74bSDan Williams  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
1587405f74bSDan Williams  * See linux/cpumask.h
1597405f74bSDan Williams  */
1607405f74bSDan Williams typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
1617405f74bSDan Williams 
1627405f74bSDan Williams /**
163c13c8260SChris Leech  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
164c13c8260SChris Leech  * @memcpy_count: transaction counter
165c13c8260SChris Leech  * @bytes_transferred: byte counter
166c13c8260SChris Leech  */
167c13c8260SChris Leech 
168c13c8260SChris Leech struct dma_chan_percpu {
169c13c8260SChris Leech 	/* stats */
170c13c8260SChris Leech 	unsigned long memcpy_count;
171c13c8260SChris Leech 	unsigned long bytes_transferred;
172c13c8260SChris Leech };
173c13c8260SChris Leech 
174c13c8260SChris Leech /**
175c13c8260SChris Leech  * struct dma_chan - devices supply DMA channels, clients use them
176fe4ada2dSRandy Dunlap  * @device: ptr to the dma device who supplies this channel, always !%NULL
177c13c8260SChris Leech  * @cookie: last cookie value returned to client
178fe4ada2dSRandy Dunlap  * @chan_id: channel ID for sysfs
17941d5e59cSDan Williams  * @dev: class device for sysfs
180c13c8260SChris Leech  * @device_node: used to add this to the device chan list
181c13c8260SChris Leech  * @local: per-cpu pointer to a struct dma_chan_percpu
1827cc5bf9aSDan Williams  * @client-count: how many clients are using this channel
183bec08513SDan Williams  * @table_count: number of appearances in the mem-to-mem allocation table
184287d8592SDan Williams  * @private: private data for certain client-channel associations
185c13c8260SChris Leech  */
186c13c8260SChris Leech struct dma_chan {
187c13c8260SChris Leech 	struct dma_device *device;
188c13c8260SChris Leech 	dma_cookie_t cookie;
189c13c8260SChris Leech 
190c13c8260SChris Leech 	/* sysfs */
191c13c8260SChris Leech 	int chan_id;
19241d5e59cSDan Williams 	struct dma_chan_dev *dev;
193c13c8260SChris Leech 
194c13c8260SChris Leech 	struct list_head device_node;
195a29d8b8eSTejun Heo 	struct dma_chan_percpu __percpu *local;
1967cc5bf9aSDan Williams 	int client_count;
197bec08513SDan Williams 	int table_count;
198287d8592SDan Williams 	void *private;
199c13c8260SChris Leech };
200c13c8260SChris Leech 
20141d5e59cSDan Williams /**
20241d5e59cSDan Williams  * struct dma_chan_dev - relate sysfs device node to backing channel device
20341d5e59cSDan Williams  * @chan - driver channel device
20441d5e59cSDan Williams  * @device - sysfs device
205864498aaSDan Williams  * @dev_id - parent dma_device dev_id
206864498aaSDan Williams  * @idr_ref - reference count to gate release of dma_device dev_id
20741d5e59cSDan Williams  */
20841d5e59cSDan Williams struct dma_chan_dev {
20941d5e59cSDan Williams 	struct dma_chan *chan;
21041d5e59cSDan Williams 	struct device device;
211864498aaSDan Williams 	int dev_id;
212864498aaSDan Williams 	atomic_t *idr_ref;
21341d5e59cSDan Williams };
21441d5e59cSDan Williams 
215c156d0a5SLinus Walleij /**
216c156d0a5SLinus Walleij  * enum dma_slave_buswidth - defines bus with of the DMA slave
217c156d0a5SLinus Walleij  * device, source or target buses
218c156d0a5SLinus Walleij  */
219c156d0a5SLinus Walleij enum dma_slave_buswidth {
220c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
221c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
222c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
223c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
224c156d0a5SLinus Walleij 	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
225c156d0a5SLinus Walleij };
226c156d0a5SLinus Walleij 
227c156d0a5SLinus Walleij /**
228c156d0a5SLinus Walleij  * struct dma_slave_config - dma slave channel runtime config
229c156d0a5SLinus Walleij  * @direction: whether the data shall go in or out on this slave
230c156d0a5SLinus Walleij  * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
231c156d0a5SLinus Walleij  * legal values, DMA_BIDIRECTIONAL is not acceptable since we
232c156d0a5SLinus Walleij  * need to differentiate source and target addresses.
233c156d0a5SLinus Walleij  * @src_addr: this is the physical address where DMA slave data
234c156d0a5SLinus Walleij  * should be read (RX), if the source is memory this argument is
235c156d0a5SLinus Walleij  * ignored.
236c156d0a5SLinus Walleij  * @dst_addr: this is the physical address where DMA slave data
237c156d0a5SLinus Walleij  * should be written (TX), if the source is memory this argument
238c156d0a5SLinus Walleij  * is ignored.
239c156d0a5SLinus Walleij  * @src_addr_width: this is the width in bytes of the source (RX)
240c156d0a5SLinus Walleij  * register where DMA data shall be read. If the source
241c156d0a5SLinus Walleij  * is memory this may be ignored depending on architecture.
242c156d0a5SLinus Walleij  * Legal values: 1, 2, 4, 8.
243c156d0a5SLinus Walleij  * @dst_addr_width: same as src_addr_width but for destination
244c156d0a5SLinus Walleij  * target (TX) mutatis mutandis.
245c156d0a5SLinus Walleij  * @src_maxburst: the maximum number of words (note: words, as in
246c156d0a5SLinus Walleij  * units of the src_addr_width member, not bytes) that can be sent
247c156d0a5SLinus Walleij  * in one burst to the device. Typically something like half the
248c156d0a5SLinus Walleij  * FIFO depth on I/O peripherals so you don't overflow it. This
249c156d0a5SLinus Walleij  * may or may not be applicable on memory sources.
250c156d0a5SLinus Walleij  * @dst_maxburst: same as src_maxburst but for destination target
251c156d0a5SLinus Walleij  * mutatis mutandis.
252c156d0a5SLinus Walleij  *
253c156d0a5SLinus Walleij  * This struct is passed in as configuration data to a DMA engine
254c156d0a5SLinus Walleij  * in order to set up a certain channel for DMA transport at runtime.
255c156d0a5SLinus Walleij  * The DMA device/engine has to provide support for an additional
256c156d0a5SLinus Walleij  * command in the channel config interface, DMA_SLAVE_CONFIG
257c156d0a5SLinus Walleij  * and this struct will then be passed in as an argument to the
258c156d0a5SLinus Walleij  * DMA engine device_control() function.
259c156d0a5SLinus Walleij  *
260c156d0a5SLinus Walleij  * The rationale for adding configuration information to this struct
261c156d0a5SLinus Walleij  * is as follows: if it is likely that most DMA slave controllers in
262c156d0a5SLinus Walleij  * the world will support the configuration option, then make it
263c156d0a5SLinus Walleij  * generic. If not: if it is fixed so that it be sent in static from
264c156d0a5SLinus Walleij  * the platform data, then prefer to do that. Else, if it is neither
265c156d0a5SLinus Walleij  * fixed at runtime, nor generic enough (such as bus mastership on
266c156d0a5SLinus Walleij  * some CPU family and whatnot) then create a custom slave config
267c156d0a5SLinus Walleij  * struct and pass that, then make this config a member of that
268c156d0a5SLinus Walleij  * struct, if applicable.
269c156d0a5SLinus Walleij  */
270c156d0a5SLinus Walleij struct dma_slave_config {
271c156d0a5SLinus Walleij 	enum dma_data_direction direction;
272c156d0a5SLinus Walleij 	dma_addr_t src_addr;
273c156d0a5SLinus Walleij 	dma_addr_t dst_addr;
274c156d0a5SLinus Walleij 	enum dma_slave_buswidth src_addr_width;
275c156d0a5SLinus Walleij 	enum dma_slave_buswidth dst_addr_width;
276c156d0a5SLinus Walleij 	u32 src_maxburst;
277c156d0a5SLinus Walleij 	u32 dst_maxburst;
278c156d0a5SLinus Walleij };
279c156d0a5SLinus Walleij 
28041d5e59cSDan Williams static inline const char *dma_chan_name(struct dma_chan *chan)
28141d5e59cSDan Williams {
28241d5e59cSDan Williams 	return dev_name(&chan->dev->device);
28341d5e59cSDan Williams }
284d379b01eSDan Williams 
285c13c8260SChris Leech void dma_chan_cleanup(struct kref *kref);
286c13c8260SChris Leech 
287c13c8260SChris Leech /**
28859b5ec21SDan Williams  * typedef dma_filter_fn - callback filter for dma_request_channel
28959b5ec21SDan Williams  * @chan: channel to be reviewed
29059b5ec21SDan Williams  * @filter_param: opaque parameter passed through dma_request_channel
29159b5ec21SDan Williams  *
29259b5ec21SDan Williams  * When this optional parameter is specified in a call to dma_request_channel a
29359b5ec21SDan Williams  * suitable channel is passed to this routine for further dispositioning before
29459b5ec21SDan Williams  * being returned.  Where 'suitable' indicates a non-busy channel that
2957dd60251SDan Williams  * satisfies the given capability mask.  It returns 'true' to indicate that the
2967dd60251SDan Williams  * channel is suitable.
29759b5ec21SDan Williams  */
2987dd60251SDan Williams typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
29959b5ec21SDan Williams 
3007405f74bSDan Williams typedef void (*dma_async_tx_callback)(void *dma_async_param);
3017405f74bSDan Williams /**
3027405f74bSDan Williams  * struct dma_async_tx_descriptor - async transaction descriptor
3037405f74bSDan Williams  * ---dma generic offload fields---
3047405f74bSDan Williams  * @cookie: tracking cookie for this transaction, set to -EBUSY if
3057405f74bSDan Williams  *	this tx is sitting on a dependency list
306636bdeaaSDan Williams  * @flags: flags to augment operation preparation, control completion, and
307636bdeaaSDan Williams  * 	communicate status
3087405f74bSDan Williams  * @phys: physical address of the descriptor
3097405f74bSDan Williams  * @chan: target channel for this operation
3107405f74bSDan Williams  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
3117405f74bSDan Williams  * @callback: routine to call after this operation is complete
3127405f74bSDan Williams  * @callback_param: general parameter to pass to the callback routine
3137405f74bSDan Williams  * ---async_tx api specific fields---
31419242d72SDan Williams  * @next: at completion submit this descriptor
3157405f74bSDan Williams  * @parent: pointer to the next level up in the dependency chain
31619242d72SDan Williams  * @lock: protect the parent and next pointers
3177405f74bSDan Williams  */
3187405f74bSDan Williams struct dma_async_tx_descriptor {
3197405f74bSDan Williams 	dma_cookie_t cookie;
320636bdeaaSDan Williams 	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
3217405f74bSDan Williams 	dma_addr_t phys;
3227405f74bSDan Williams 	struct dma_chan *chan;
3237405f74bSDan Williams 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
3247405f74bSDan Williams 	dma_async_tx_callback callback;
3257405f74bSDan Williams 	void *callback_param;
3265fc6d897SDan Williams #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
32719242d72SDan Williams 	struct dma_async_tx_descriptor *next;
3287405f74bSDan Williams 	struct dma_async_tx_descriptor *parent;
3297405f74bSDan Williams 	spinlock_t lock;
330caa20d97SDan Williams #endif
3317405f74bSDan Williams };
3327405f74bSDan Williams 
3335fc6d897SDan Williams #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
334caa20d97SDan Williams static inline void txd_lock(struct dma_async_tx_descriptor *txd)
335caa20d97SDan Williams {
336caa20d97SDan Williams }
337caa20d97SDan Williams static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
338caa20d97SDan Williams {
339caa20d97SDan Williams }
340caa20d97SDan Williams static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
341caa20d97SDan Williams {
342caa20d97SDan Williams 	BUG();
343caa20d97SDan Williams }
344caa20d97SDan Williams static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
345caa20d97SDan Williams {
346caa20d97SDan Williams }
347caa20d97SDan Williams static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
348caa20d97SDan Williams {
349caa20d97SDan Williams }
350caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
351caa20d97SDan Williams {
352caa20d97SDan Williams 	return NULL;
353caa20d97SDan Williams }
354caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
355caa20d97SDan Williams {
356caa20d97SDan Williams 	return NULL;
357caa20d97SDan Williams }
358caa20d97SDan Williams 
359caa20d97SDan Williams #else
360caa20d97SDan Williams static inline void txd_lock(struct dma_async_tx_descriptor *txd)
361caa20d97SDan Williams {
362caa20d97SDan Williams 	spin_lock_bh(&txd->lock);
363caa20d97SDan Williams }
364caa20d97SDan Williams static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
365caa20d97SDan Williams {
366caa20d97SDan Williams 	spin_unlock_bh(&txd->lock);
367caa20d97SDan Williams }
368caa20d97SDan Williams static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
369caa20d97SDan Williams {
370caa20d97SDan Williams 	txd->next = next;
371caa20d97SDan Williams 	next->parent = txd;
372caa20d97SDan Williams }
373caa20d97SDan Williams static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
374caa20d97SDan Williams {
375caa20d97SDan Williams 	txd->parent = NULL;
376caa20d97SDan Williams }
377caa20d97SDan Williams static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
378caa20d97SDan Williams {
379caa20d97SDan Williams 	txd->next = NULL;
380caa20d97SDan Williams }
381caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
382caa20d97SDan Williams {
383caa20d97SDan Williams 	return txd->parent;
384caa20d97SDan Williams }
385caa20d97SDan Williams static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
386caa20d97SDan Williams {
387caa20d97SDan Williams 	return txd->next;
388caa20d97SDan Williams }
389caa20d97SDan Williams #endif
390caa20d97SDan Williams 
391c13c8260SChris Leech /**
39207934481SLinus Walleij  * struct dma_tx_state - filled in to report the status of
39307934481SLinus Walleij  * a transfer.
39407934481SLinus Walleij  * @last: last completed DMA cookie
39507934481SLinus Walleij  * @used: last issued DMA cookie (i.e. the one in progress)
39607934481SLinus Walleij  * @residue: the remaining number of bytes left to transmit
39707934481SLinus Walleij  *	on the selected transfer for states DMA_IN_PROGRESS and
39807934481SLinus Walleij  *	DMA_PAUSED if this is implemented in the driver, else 0
39907934481SLinus Walleij  */
40007934481SLinus Walleij struct dma_tx_state {
40107934481SLinus Walleij 	dma_cookie_t last;
40207934481SLinus Walleij 	dma_cookie_t used;
40307934481SLinus Walleij 	u32 residue;
40407934481SLinus Walleij };
40507934481SLinus Walleij 
40607934481SLinus Walleij /**
407c13c8260SChris Leech  * struct dma_device - info on the entity supplying DMA services
408c13c8260SChris Leech  * @chancnt: how many DMA channels are supported
4090f571515SAtsushi Nemoto  * @privatecnt: how many DMA channels are requested by dma_request_channel
410c13c8260SChris Leech  * @channels: the list of struct dma_chan
411c13c8260SChris Leech  * @global_node: list_head for global dma_device_list
4127405f74bSDan Williams  * @cap_mask: one or more dma_capability flags
4137405f74bSDan Williams  * @max_xor: maximum number of xor sources, 0 if no capability
414b2f46fd8SDan Williams  * @max_pq: maximum number of PQ sources and PQ-continue capability
41583544ae9SDan Williams  * @copy_align: alignment shift for memcpy operations
41683544ae9SDan Williams  * @xor_align: alignment shift for xor operations
41783544ae9SDan Williams  * @pq_align: alignment shift for pq operations
41883544ae9SDan Williams  * @fill_align: alignment shift for memset operations
419fe4ada2dSRandy Dunlap  * @dev_id: unique device ID
4207405f74bSDan Williams  * @dev: struct device reference for dma mapping api
421fe4ada2dSRandy Dunlap  * @device_alloc_chan_resources: allocate resources and return the
422fe4ada2dSRandy Dunlap  *	number of allocated descriptors
423fe4ada2dSRandy Dunlap  * @device_free_chan_resources: release DMA channel's resources
4247405f74bSDan Williams  * @device_prep_dma_memcpy: prepares a memcpy operation
4257405f74bSDan Williams  * @device_prep_dma_xor: prepares a xor operation
426099f53cbSDan Williams  * @device_prep_dma_xor_val: prepares a xor validation operation
427b2f46fd8SDan Williams  * @device_prep_dma_pq: prepares a pq operation
428b2f46fd8SDan Williams  * @device_prep_dma_pq_val: prepares a pqzero_sum operation
4297405f74bSDan Williams  * @device_prep_dma_memset: prepares a memset operation
4307405f74bSDan Williams  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
431dc0ee643SHaavard Skinnemoen  * @device_prep_slave_sg: prepares a slave dma operation
432782bc950SSascha Hauer  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
433782bc950SSascha Hauer  *	The function takes a buffer of size buf_len. The callback function will
434782bc950SSascha Hauer  *	be called after period_len bytes have been transferred.
435c3635c78SLinus Walleij  * @device_control: manipulate all pending operations on a channel, returns
436c3635c78SLinus Walleij  *	zero or error code
43707934481SLinus Walleij  * @device_tx_status: poll for transaction completion, the optional
43807934481SLinus Walleij  *	txstate parameter can be supplied with a pointer to get a
43925985edcSLucas De Marchi  *	struct with auxiliary transfer status information, otherwise the call
44007934481SLinus Walleij  *	will just return a simple status code
4417405f74bSDan Williams  * @device_issue_pending: push pending transactions to hardware
442c13c8260SChris Leech  */
443c13c8260SChris Leech struct dma_device {
444c13c8260SChris Leech 
445c13c8260SChris Leech 	unsigned int chancnt;
4460f571515SAtsushi Nemoto 	unsigned int privatecnt;
447c13c8260SChris Leech 	struct list_head channels;
448c13c8260SChris Leech 	struct list_head global_node;
4497405f74bSDan Williams 	dma_cap_mask_t  cap_mask;
450b2f46fd8SDan Williams 	unsigned short max_xor;
451b2f46fd8SDan Williams 	unsigned short max_pq;
45283544ae9SDan Williams 	u8 copy_align;
45383544ae9SDan Williams 	u8 xor_align;
45483544ae9SDan Williams 	u8 pq_align;
45583544ae9SDan Williams 	u8 fill_align;
456b2f46fd8SDan Williams 	#define DMA_HAS_PQ_CONTINUE (1 << 15)
457c13c8260SChris Leech 
458c13c8260SChris Leech 	int dev_id;
4597405f74bSDan Williams 	struct device *dev;
460c13c8260SChris Leech 
461aa1e6f1aSDan Williams 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
462c13c8260SChris Leech 	void (*device_free_chan_resources)(struct dma_chan *chan);
4637405f74bSDan Williams 
4647405f74bSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
4650036731cSDan Williams 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
466d4c56f97SDan Williams 		size_t len, unsigned long flags);
4677405f74bSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
4680036731cSDan Williams 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
469d4c56f97SDan Williams 		unsigned int src_cnt, size_t len, unsigned long flags);
470099f53cbSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
4710036731cSDan Williams 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
472ad283ea4SDan Williams 		size_t len, enum sum_check_flags *result, unsigned long flags);
473b2f46fd8SDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
474b2f46fd8SDan Williams 		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
475b2f46fd8SDan Williams 		unsigned int src_cnt, const unsigned char *scf,
476b2f46fd8SDan Williams 		size_t len, unsigned long flags);
477b2f46fd8SDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
478b2f46fd8SDan Williams 		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
479b2f46fd8SDan Williams 		unsigned int src_cnt, const unsigned char *scf, size_t len,
480b2f46fd8SDan Williams 		enum sum_check_flags *pqres, unsigned long flags);
4817405f74bSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
4820036731cSDan Williams 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
483d4c56f97SDan Williams 		unsigned long flags);
4847405f74bSDan Williams 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
485636bdeaaSDan Williams 		struct dma_chan *chan, unsigned long flags);
486a86ee03cSIra Snyder 	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
487a86ee03cSIra Snyder 		struct dma_chan *chan,
488a86ee03cSIra Snyder 		struct scatterlist *dst_sg, unsigned int dst_nents,
489a86ee03cSIra Snyder 		struct scatterlist *src_sg, unsigned int src_nents,
490a86ee03cSIra Snyder 		unsigned long flags);
4917405f74bSDan Williams 
492dc0ee643SHaavard Skinnemoen 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
493dc0ee643SHaavard Skinnemoen 		struct dma_chan *chan, struct scatterlist *sgl,
494dc0ee643SHaavard Skinnemoen 		unsigned int sg_len, enum dma_data_direction direction,
495dc0ee643SHaavard Skinnemoen 		unsigned long flags);
496782bc950SSascha Hauer 	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
497782bc950SSascha Hauer 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
498782bc950SSascha Hauer 		size_t period_len, enum dma_data_direction direction);
49905827630SLinus Walleij 	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
50005827630SLinus Walleij 		unsigned long arg);
501dc0ee643SHaavard Skinnemoen 
50207934481SLinus Walleij 	enum dma_status (*device_tx_status)(struct dma_chan *chan,
50307934481SLinus Walleij 					    dma_cookie_t cookie,
50407934481SLinus Walleij 					    struct dma_tx_state *txstate);
5057405f74bSDan Williams 	void (*device_issue_pending)(struct dma_chan *chan);
506c13c8260SChris Leech };
507c13c8260SChris Leech 
5086e3ecaf0SSascha Hauer static inline int dmaengine_device_control(struct dma_chan *chan,
5096e3ecaf0SSascha Hauer 					   enum dma_ctrl_cmd cmd,
5106e3ecaf0SSascha Hauer 					   unsigned long arg)
5116e3ecaf0SSascha Hauer {
5126e3ecaf0SSascha Hauer 	return chan->device->device_control(chan, cmd, arg);
5136e3ecaf0SSascha Hauer }
5146e3ecaf0SSascha Hauer 
5156e3ecaf0SSascha Hauer static inline int dmaengine_slave_config(struct dma_chan *chan,
5166e3ecaf0SSascha Hauer 					  struct dma_slave_config *config)
5176e3ecaf0SSascha Hauer {
5186e3ecaf0SSascha Hauer 	return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
5196e3ecaf0SSascha Hauer 			(unsigned long)config);
5206e3ecaf0SSascha Hauer }
5216e3ecaf0SSascha Hauer 
5226e3ecaf0SSascha Hauer static inline int dmaengine_terminate_all(struct dma_chan *chan)
5236e3ecaf0SSascha Hauer {
5246e3ecaf0SSascha Hauer 	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
5256e3ecaf0SSascha Hauer }
5266e3ecaf0SSascha Hauer 
5276e3ecaf0SSascha Hauer static inline int dmaengine_pause(struct dma_chan *chan)
5286e3ecaf0SSascha Hauer {
5296e3ecaf0SSascha Hauer 	return dmaengine_device_control(chan, DMA_PAUSE, 0);
5306e3ecaf0SSascha Hauer }
5316e3ecaf0SSascha Hauer 
5326e3ecaf0SSascha Hauer static inline int dmaengine_resume(struct dma_chan *chan)
5336e3ecaf0SSascha Hauer {
5346e3ecaf0SSascha Hauer 	return dmaengine_device_control(chan, DMA_RESUME, 0);
5356e3ecaf0SSascha Hauer }
5366e3ecaf0SSascha Hauer 
53798d530feSRussell King - ARM Linux static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
5386e3ecaf0SSascha Hauer {
5396e3ecaf0SSascha Hauer 	return desc->tx_submit(desc);
5406e3ecaf0SSascha Hauer }
5416e3ecaf0SSascha Hauer 
54283544ae9SDan Williams static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
54383544ae9SDan Williams {
54483544ae9SDan Williams 	size_t mask;
54583544ae9SDan Williams 
54683544ae9SDan Williams 	if (!align)
54783544ae9SDan Williams 		return true;
54883544ae9SDan Williams 	mask = (1 << align) - 1;
54983544ae9SDan Williams 	if (mask & (off1 | off2 | len))
55083544ae9SDan Williams 		return false;
55183544ae9SDan Williams 	return true;
55283544ae9SDan Williams }
55383544ae9SDan Williams 
55483544ae9SDan Williams static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
55583544ae9SDan Williams 				       size_t off2, size_t len)
55683544ae9SDan Williams {
55783544ae9SDan Williams 	return dmaengine_check_align(dev->copy_align, off1, off2, len);
55883544ae9SDan Williams }
55983544ae9SDan Williams 
56083544ae9SDan Williams static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
56183544ae9SDan Williams 				      size_t off2, size_t len)
56283544ae9SDan Williams {
56383544ae9SDan Williams 	return dmaengine_check_align(dev->xor_align, off1, off2, len);
56483544ae9SDan Williams }
56583544ae9SDan Williams 
56683544ae9SDan Williams static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
56783544ae9SDan Williams 				     size_t off2, size_t len)
56883544ae9SDan Williams {
56983544ae9SDan Williams 	return dmaengine_check_align(dev->pq_align, off1, off2, len);
57083544ae9SDan Williams }
57183544ae9SDan Williams 
57283544ae9SDan Williams static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
57383544ae9SDan Williams 				       size_t off2, size_t len)
57483544ae9SDan Williams {
57583544ae9SDan Williams 	return dmaengine_check_align(dev->fill_align, off1, off2, len);
57683544ae9SDan Williams }
57783544ae9SDan Williams 
578b2f46fd8SDan Williams static inline void
579b2f46fd8SDan Williams dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
580b2f46fd8SDan Williams {
581b2f46fd8SDan Williams 	dma->max_pq = maxpq;
582b2f46fd8SDan Williams 	if (has_pq_continue)
583b2f46fd8SDan Williams 		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
584b2f46fd8SDan Williams }
585b2f46fd8SDan Williams 
586b2f46fd8SDan Williams static inline bool dmaf_continue(enum dma_ctrl_flags flags)
587b2f46fd8SDan Williams {
588b2f46fd8SDan Williams 	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
589b2f46fd8SDan Williams }
590b2f46fd8SDan Williams 
591b2f46fd8SDan Williams static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
592b2f46fd8SDan Williams {
593b2f46fd8SDan Williams 	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
594b2f46fd8SDan Williams 
595b2f46fd8SDan Williams 	return (flags & mask) == mask;
596b2f46fd8SDan Williams }
597b2f46fd8SDan Williams 
598b2f46fd8SDan Williams static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
599b2f46fd8SDan Williams {
600b2f46fd8SDan Williams 	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
601b2f46fd8SDan Williams }
602b2f46fd8SDan Williams 
603d3f3cf85SMathieu Lacage static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
604b2f46fd8SDan Williams {
605b2f46fd8SDan Williams 	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
606b2f46fd8SDan Williams }
607b2f46fd8SDan Williams 
608b2f46fd8SDan Williams /* dma_maxpq - reduce maxpq in the face of continued operations
609b2f46fd8SDan Williams  * @dma - dma device with PQ capability
610b2f46fd8SDan Williams  * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
611b2f46fd8SDan Williams  *
612b2f46fd8SDan Williams  * When an engine does not support native continuation we need 3 extra
613b2f46fd8SDan Williams  * source slots to reuse P and Q with the following coefficients:
614b2f46fd8SDan Williams  * 1/ {00} * P : remove P from Q', but use it as a source for P'
615b2f46fd8SDan Williams  * 2/ {01} * Q : use Q to continue Q' calculation
616b2f46fd8SDan Williams  * 3/ {00} * Q : subtract Q from P' to cancel (2)
617b2f46fd8SDan Williams  *
618b2f46fd8SDan Williams  * In the case where P is disabled we only need 1 extra source:
619b2f46fd8SDan Williams  * 1/ {01} * Q : use Q to continue Q' calculation
620b2f46fd8SDan Williams  */
621b2f46fd8SDan Williams static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
622b2f46fd8SDan Williams {
623b2f46fd8SDan Williams 	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
624b2f46fd8SDan Williams 		return dma_dev_to_maxpq(dma);
625b2f46fd8SDan Williams 	else if (dmaf_p_disabled_continue(flags))
626b2f46fd8SDan Williams 		return dma_dev_to_maxpq(dma) - 1;
627b2f46fd8SDan Williams 	else if (dmaf_continue(flags))
628b2f46fd8SDan Williams 		return dma_dev_to_maxpq(dma) - 3;
629b2f46fd8SDan Williams 	BUG();
630b2f46fd8SDan Williams }
631b2f46fd8SDan Williams 
632c13c8260SChris Leech /* --- public DMA engine API --- */
633c13c8260SChris Leech 
634649274d9SDan Williams #ifdef CONFIG_DMA_ENGINE
635209b84a8SDan Williams void dmaengine_get(void);
636209b84a8SDan Williams void dmaengine_put(void);
637649274d9SDan Williams #else
638649274d9SDan Williams static inline void dmaengine_get(void)
639649274d9SDan Williams {
640649274d9SDan Williams }
641649274d9SDan Williams static inline void dmaengine_put(void)
642649274d9SDan Williams {
643649274d9SDan Williams }
644649274d9SDan Williams #endif
645649274d9SDan Williams 
646b4bd07c2SDavid S. Miller #ifdef CONFIG_NET_DMA
647b4bd07c2SDavid S. Miller #define net_dmaengine_get()	dmaengine_get()
648b4bd07c2SDavid S. Miller #define net_dmaengine_put()	dmaengine_put()
649b4bd07c2SDavid S. Miller #else
650b4bd07c2SDavid S. Miller static inline void net_dmaengine_get(void)
651b4bd07c2SDavid S. Miller {
652b4bd07c2SDavid S. Miller }
653b4bd07c2SDavid S. Miller static inline void net_dmaengine_put(void)
654b4bd07c2SDavid S. Miller {
655b4bd07c2SDavid S. Miller }
656b4bd07c2SDavid S. Miller #endif
657b4bd07c2SDavid S. Miller 
658729b5d1bSDan Williams #ifdef CONFIG_ASYNC_TX_DMA
659729b5d1bSDan Williams #define async_dmaengine_get()	dmaengine_get()
660729b5d1bSDan Williams #define async_dmaengine_put()	dmaengine_put()
6615fc6d897SDan Williams #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
662138f4c35SDan Williams #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
663138f4c35SDan Williams #else
664729b5d1bSDan Williams #define async_dma_find_channel(type) dma_find_channel(type)
6655fc6d897SDan Williams #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
666729b5d1bSDan Williams #else
667729b5d1bSDan Williams static inline void async_dmaengine_get(void)
668729b5d1bSDan Williams {
669729b5d1bSDan Williams }
670729b5d1bSDan Williams static inline void async_dmaengine_put(void)
671729b5d1bSDan Williams {
672729b5d1bSDan Williams }
673729b5d1bSDan Williams static inline struct dma_chan *
674729b5d1bSDan Williams async_dma_find_channel(enum dma_transaction_type type)
675729b5d1bSDan Williams {
676729b5d1bSDan Williams 	return NULL;
677729b5d1bSDan Williams }
678138f4c35SDan Williams #endif /* CONFIG_ASYNC_TX_DMA */
679729b5d1bSDan Williams 
6807405f74bSDan Williams dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
6817405f74bSDan Williams 	void *dest, void *src, size_t len);
6827405f74bSDan Williams dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
6837405f74bSDan Williams 	struct page *page, unsigned int offset, void *kdata, size_t len);
6847405f74bSDan Williams dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
685c13c8260SChris Leech 	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
6867405f74bSDan Williams 	unsigned int src_off, size_t len);
6877405f74bSDan Williams void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
6887405f74bSDan Williams 	struct dma_chan *chan);
689c13c8260SChris Leech 
6900839875eSDan Williams static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
6917405f74bSDan Williams {
692636bdeaaSDan Williams 	tx->flags |= DMA_CTRL_ACK;
693636bdeaaSDan Williams }
694636bdeaaSDan Williams 
695ef560682SGuennadi Liakhovetski static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
696ef560682SGuennadi Liakhovetski {
697ef560682SGuennadi Liakhovetski 	tx->flags &= ~DMA_CTRL_ACK;
698ef560682SGuennadi Liakhovetski }
699ef560682SGuennadi Liakhovetski 
7000839875eSDan Williams static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
701636bdeaaSDan Williams {
7020839875eSDan Williams 	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
703c13c8260SChris Leech }
704c13c8260SChris Leech 
7057405f74bSDan Williams #define first_dma_cap(mask) __first_dma_cap(&(mask))
7067405f74bSDan Williams static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
7077405f74bSDan Williams {
7087405f74bSDan Williams 	return min_t(int, DMA_TX_TYPE_END,
7097405f74bSDan Williams 		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
7107405f74bSDan Williams }
7117405f74bSDan Williams 
7127405f74bSDan Williams #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
7137405f74bSDan Williams static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
7147405f74bSDan Williams {
7157405f74bSDan Williams 	return min_t(int, DMA_TX_TYPE_END,
7167405f74bSDan Williams 		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
7177405f74bSDan Williams }
7187405f74bSDan Williams 
7197405f74bSDan Williams #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
7207405f74bSDan Williams static inline void
7217405f74bSDan Williams __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
7227405f74bSDan Williams {
7237405f74bSDan Williams 	set_bit(tx_type, dstp->bits);
7247405f74bSDan Williams }
7257405f74bSDan Williams 
7260f571515SAtsushi Nemoto #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
7270f571515SAtsushi Nemoto static inline void
7280f571515SAtsushi Nemoto __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
7290f571515SAtsushi Nemoto {
7300f571515SAtsushi Nemoto 	clear_bit(tx_type, dstp->bits);
7310f571515SAtsushi Nemoto }
7320f571515SAtsushi Nemoto 
73333df8ca0SDan Williams #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
73433df8ca0SDan Williams static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
73533df8ca0SDan Williams {
73633df8ca0SDan Williams 	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
73733df8ca0SDan Williams }
73833df8ca0SDan Williams 
7397405f74bSDan Williams #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
7407405f74bSDan Williams static inline int
7417405f74bSDan Williams __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
7427405f74bSDan Williams {
7437405f74bSDan Williams 	return test_bit(tx_type, srcp->bits);
7447405f74bSDan Williams }
7457405f74bSDan Williams 
7467405f74bSDan Williams #define for_each_dma_cap_mask(cap, mask) \
7477405f74bSDan Williams 	for ((cap) = first_dma_cap(mask);	\
7487405f74bSDan Williams 		(cap) < DMA_TX_TYPE_END;	\
7497405f74bSDan Williams 		(cap) = next_dma_cap((cap), (mask)))
7507405f74bSDan Williams 
751c13c8260SChris Leech /**
7527405f74bSDan Williams  * dma_async_issue_pending - flush pending transactions to HW
753fe4ada2dSRandy Dunlap  * @chan: target DMA channel
754c13c8260SChris Leech  *
755c13c8260SChris Leech  * This allows drivers to push copies to HW in batches,
756c13c8260SChris Leech  * reducing MMIO writes where possible.
757c13c8260SChris Leech  */
7587405f74bSDan Williams static inline void dma_async_issue_pending(struct dma_chan *chan)
759c13c8260SChris Leech {
760ec8670f1SDan Williams 	chan->device->device_issue_pending(chan);
761c13c8260SChris Leech }
762c13c8260SChris Leech 
7637405f74bSDan Williams #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
7647405f74bSDan Williams 
765c13c8260SChris Leech /**
7667405f74bSDan Williams  * dma_async_is_tx_complete - poll for transaction completion
767c13c8260SChris Leech  * @chan: DMA channel
768c13c8260SChris Leech  * @cookie: transaction identifier to check status of
769c13c8260SChris Leech  * @last: returns last completed cookie, can be NULL
770c13c8260SChris Leech  * @used: returns last issued cookie, can be NULL
771c13c8260SChris Leech  *
772c13c8260SChris Leech  * If @last and @used are passed in, upon return they reflect the driver
773c13c8260SChris Leech  * internal state and can be used with dma_async_is_complete() to check
774c13c8260SChris Leech  * the status of multiple cookies without re-checking hardware state.
775c13c8260SChris Leech  */
7767405f74bSDan Williams static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
777c13c8260SChris Leech 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
778c13c8260SChris Leech {
77907934481SLinus Walleij 	struct dma_tx_state state;
78007934481SLinus Walleij 	enum dma_status status;
78107934481SLinus Walleij 
78207934481SLinus Walleij 	status = chan->device->device_tx_status(chan, cookie, &state);
78307934481SLinus Walleij 	if (last)
78407934481SLinus Walleij 		*last = state.last;
78507934481SLinus Walleij 	if (used)
78607934481SLinus Walleij 		*used = state.used;
78707934481SLinus Walleij 	return status;
788c13c8260SChris Leech }
789c13c8260SChris Leech 
7907405f74bSDan Williams #define dma_async_memcpy_complete(chan, cookie, last, used)\
7917405f74bSDan Williams 	dma_async_is_tx_complete(chan, cookie, last, used)
7927405f74bSDan Williams 
793c13c8260SChris Leech /**
794c13c8260SChris Leech  * dma_async_is_complete - test a cookie against chan state
795c13c8260SChris Leech  * @cookie: transaction identifier to test status of
796c13c8260SChris Leech  * @last_complete: last know completed transaction
797c13c8260SChris Leech  * @last_used: last cookie value handed out
798c13c8260SChris Leech  *
799c13c8260SChris Leech  * dma_async_is_complete() is used in dma_async_memcpy_complete()
8008a5703f8SSebastian Siewior  * the test logic is separated for lightweight testing of multiple cookies
801c13c8260SChris Leech  */
802c13c8260SChris Leech static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
803c13c8260SChris Leech 			dma_cookie_t last_complete, dma_cookie_t last_used)
804c13c8260SChris Leech {
805c13c8260SChris Leech 	if (last_complete <= last_used) {
806c13c8260SChris Leech 		if ((cookie <= last_complete) || (cookie > last_used))
807c13c8260SChris Leech 			return DMA_SUCCESS;
808c13c8260SChris Leech 	} else {
809c13c8260SChris Leech 		if ((cookie <= last_complete) && (cookie > last_used))
810c13c8260SChris Leech 			return DMA_SUCCESS;
811c13c8260SChris Leech 	}
812c13c8260SChris Leech 	return DMA_IN_PROGRESS;
813c13c8260SChris Leech }
814c13c8260SChris Leech 
815bca34692SDan Williams static inline void
816bca34692SDan Williams dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
817bca34692SDan Williams {
818bca34692SDan Williams 	if (st) {
819bca34692SDan Williams 		st->last = last;
820bca34692SDan Williams 		st->used = used;
821bca34692SDan Williams 		st->residue = residue;
822bca34692SDan Williams 	}
823bca34692SDan Williams }
824bca34692SDan Williams 
8257405f74bSDan Williams enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
82607f2211eSDan Williams #ifdef CONFIG_DMA_ENGINE
82707f2211eSDan Williams enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
828c50331e8SDan Williams void dma_issue_pending_all(void);
8298f33d527SGuennadi Liakhovetski struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
8308f33d527SGuennadi Liakhovetski void dma_release_channel(struct dma_chan *chan);
83107f2211eSDan Williams #else
83207f2211eSDan Williams static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
83307f2211eSDan Williams {
83407f2211eSDan Williams 	return DMA_SUCCESS;
83507f2211eSDan Williams }
836c50331e8SDan Williams static inline void dma_issue_pending_all(void)
837c50331e8SDan Williams {
8388f33d527SGuennadi Liakhovetski }
8398f33d527SGuennadi Liakhovetski static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
8408f33d527SGuennadi Liakhovetski 					      dma_filter_fn fn, void *fn_param)
8418f33d527SGuennadi Liakhovetski {
8428f33d527SGuennadi Liakhovetski 	return NULL;
8438f33d527SGuennadi Liakhovetski }
8448f33d527SGuennadi Liakhovetski static inline void dma_release_channel(struct dma_chan *chan)
8458f33d527SGuennadi Liakhovetski {
846c50331e8SDan Williams }
84707f2211eSDan Williams #endif
848c13c8260SChris Leech 
849c13c8260SChris Leech /* --- DMA device --- */
850c13c8260SChris Leech 
851c13c8260SChris Leech int dma_async_device_register(struct dma_device *device);
852c13c8260SChris Leech void dma_async_device_unregister(struct dma_device *device);
85307f2211eSDan Williams void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
854bec08513SDan Williams struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
85559b5ec21SDan Williams #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
856c13c8260SChris Leech 
857de5506e1SChris Leech /* --- Helper iov-locking functions --- */
858de5506e1SChris Leech 
859de5506e1SChris Leech struct dma_page_list {
860b2ddb901SAl Viro 	char __user *base_address;
861de5506e1SChris Leech 	int nr_pages;
862de5506e1SChris Leech 	struct page **pages;
863de5506e1SChris Leech };
864de5506e1SChris Leech 
865de5506e1SChris Leech struct dma_pinned_list {
866de5506e1SChris Leech 	int nr_iovecs;
867de5506e1SChris Leech 	struct dma_page_list page_list[0];
868de5506e1SChris Leech };
869de5506e1SChris Leech 
870de5506e1SChris Leech struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
871de5506e1SChris Leech void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
872de5506e1SChris Leech 
873de5506e1SChris Leech dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
874de5506e1SChris Leech 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
875de5506e1SChris Leech dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
876de5506e1SChris Leech 	struct dma_pinned_list *pinned_list, struct page *page,
877de5506e1SChris Leech 	unsigned int offset, size_t len);
878de5506e1SChris Leech 
879c13c8260SChris Leech #endif /* DMAENGINE_H */
880