xref: /linux-6.15/include/linux/dmaengine.h (revision ff10fca5)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 #ifndef DMAENGINE_H
22 #define DMAENGINE_H
23 
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/dma-mapping.h>
27 
28 /**
29  * typedef dma_cookie_t - an opaque DMA cookie
30  *
31  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
32  */
33 typedef s32 dma_cookie_t;
34 #define DMA_MIN_COOKIE	1
35 #define DMA_MAX_COOKIE	INT_MAX
36 
37 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
38 
39 /**
40  * enum dma_status - DMA transaction status
41  * @DMA_SUCCESS: transaction completed successfully
42  * @DMA_IN_PROGRESS: transaction not yet processed
43  * @DMA_PAUSED: transaction is paused
44  * @DMA_ERROR: transaction failed
45  */
46 enum dma_status {
47 	DMA_SUCCESS,
48 	DMA_IN_PROGRESS,
49 	DMA_PAUSED,
50 	DMA_ERROR,
51 };
52 
53 /**
54  * enum dma_transaction_type - DMA transaction types/indexes
55  *
56  * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
57  * automatically set as dma devices are registered.
58  */
59 enum dma_transaction_type {
60 	DMA_MEMCPY,
61 	DMA_XOR,
62 	DMA_PQ,
63 	DMA_XOR_VAL,
64 	DMA_PQ_VAL,
65 	DMA_MEMSET,
66 	DMA_INTERRUPT,
67 	DMA_PRIVATE,
68 	DMA_ASYNC_TX,
69 	DMA_SLAVE,
70 };
71 
72 /* last transaction type for creation of the capabilities mask */
73 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
74 
75 
76 /**
77  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
78  *  control completion, and communicate status.
79  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
80  *  this transaction
81  * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
82  *  acknowledges receipt, i.e. has has a chance to establish any dependency
83  *  chains
84  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
85  * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
86  * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
87  * 	(if not set, do the source dma-unmapping as page)
88  * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
89  * 	(if not set, do the destination dma-unmapping as page)
90  * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
91  * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
92  * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
93  *  sources that were the result of a previous operation, in the case of a PQ
94  *  operation it continues the calculation with new sources
95  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
96  *  on the result of this operation
97  */
98 enum dma_ctrl_flags {
99 	DMA_PREP_INTERRUPT = (1 << 0),
100 	DMA_CTRL_ACK = (1 << 1),
101 	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
102 	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
103 	DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
104 	DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
105 	DMA_PREP_PQ_DISABLE_P = (1 << 6),
106 	DMA_PREP_PQ_DISABLE_Q = (1 << 7),
107 	DMA_PREP_CONTINUE = (1 << 8),
108 	DMA_PREP_FENCE = (1 << 9),
109 };
110 
111 /**
112  * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
113  * on a running channel.
114  * @DMA_TERMINATE_ALL: terminate all ongoing transfers
115  * @DMA_PAUSE: pause ongoing transfers
116  * @DMA_RESUME: resume paused transfer
117  * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
118  * that need to runtime reconfigure the slave channels (as opposed to passing
119  * configuration data in statically from the platform). An additional
120  * argument of struct dma_slave_config must be passed in with this
121  * command.
122  */
123 enum dma_ctrl_cmd {
124 	DMA_TERMINATE_ALL,
125 	DMA_PAUSE,
126 	DMA_RESUME,
127 	DMA_SLAVE_CONFIG,
128 };
129 
130 /**
131  * enum sum_check_bits - bit position of pq_check_flags
132  */
133 enum sum_check_bits {
134 	SUM_CHECK_P = 0,
135 	SUM_CHECK_Q = 1,
136 };
137 
138 /**
139  * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
140  * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
141  * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
142  */
143 enum sum_check_flags {
144 	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
145 	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
146 };
147 
148 
149 /**
150  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
151  * See linux/cpumask.h
152  */
153 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
154 
155 /**
156  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
157  * @memcpy_count: transaction counter
158  * @bytes_transferred: byte counter
159  */
160 
161 struct dma_chan_percpu {
162 	/* stats */
163 	unsigned long memcpy_count;
164 	unsigned long bytes_transferred;
165 };
166 
167 /**
168  * struct dma_chan - devices supply DMA channels, clients use them
169  * @device: ptr to the dma device who supplies this channel, always !%NULL
170  * @cookie: last cookie value returned to client
171  * @chan_id: channel ID for sysfs
172  * @dev: class device for sysfs
173  * @device_node: used to add this to the device chan list
174  * @local: per-cpu pointer to a struct dma_chan_percpu
175  * @client-count: how many clients are using this channel
176  * @table_count: number of appearances in the mem-to-mem allocation table
177  * @private: private data for certain client-channel associations
178  */
179 struct dma_chan {
180 	struct dma_device *device;
181 	dma_cookie_t cookie;
182 
183 	/* sysfs */
184 	int chan_id;
185 	struct dma_chan_dev *dev;
186 
187 	struct list_head device_node;
188 	struct dma_chan_percpu __percpu *local;
189 	int client_count;
190 	int table_count;
191 	void *private;
192 };
193 
194 /**
195  * struct dma_chan_dev - relate sysfs device node to backing channel device
196  * @chan - driver channel device
197  * @device - sysfs device
198  * @dev_id - parent dma_device dev_id
199  * @idr_ref - reference count to gate release of dma_device dev_id
200  */
201 struct dma_chan_dev {
202 	struct dma_chan *chan;
203 	struct device device;
204 	int dev_id;
205 	atomic_t *idr_ref;
206 };
207 
208 /**
209  * enum dma_slave_buswidth - defines bus with of the DMA slave
210  * device, source or target buses
211  */
212 enum dma_slave_buswidth {
213 	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
214 	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
215 	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
216 	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
217 	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
218 };
219 
220 /**
221  * struct dma_slave_config - dma slave channel runtime config
222  * @direction: whether the data shall go in or out on this slave
223  * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
224  * legal values, DMA_BIDIRECTIONAL is not acceptable since we
225  * need to differentiate source and target addresses.
226  * @src_addr: this is the physical address where DMA slave data
227  * should be read (RX), if the source is memory this argument is
228  * ignored.
229  * @dst_addr: this is the physical address where DMA slave data
230  * should be written (TX), if the source is memory this argument
231  * is ignored.
232  * @src_addr_width: this is the width in bytes of the source (RX)
233  * register where DMA data shall be read. If the source
234  * is memory this may be ignored depending on architecture.
235  * Legal values: 1, 2, 4, 8.
236  * @dst_addr_width: same as src_addr_width but for destination
237  * target (TX) mutatis mutandis.
238  * @src_maxburst: the maximum number of words (note: words, as in
239  * units of the src_addr_width member, not bytes) that can be sent
240  * in one burst to the device. Typically something like half the
241  * FIFO depth on I/O peripherals so you don't overflow it. This
242  * may or may not be applicable on memory sources.
243  * @dst_maxburst: same as src_maxburst but for destination target
244  * mutatis mutandis.
245  *
246  * This struct is passed in as configuration data to a DMA engine
247  * in order to set up a certain channel for DMA transport at runtime.
248  * The DMA device/engine has to provide support for an additional
249  * command in the channel config interface, DMA_SLAVE_CONFIG
250  * and this struct will then be passed in as an argument to the
251  * DMA engine device_control() function.
252  *
253  * The rationale for adding configuration information to this struct
254  * is as follows: if it is likely that most DMA slave controllers in
255  * the world will support the configuration option, then make it
256  * generic. If not: if it is fixed so that it be sent in static from
257  * the platform data, then prefer to do that. Else, if it is neither
258  * fixed at runtime, nor generic enough (such as bus mastership on
259  * some CPU family and whatnot) then create a custom slave config
260  * struct and pass that, then make this config a member of that
261  * struct, if applicable.
262  */
263 struct dma_slave_config {
264 	enum dma_data_direction direction;
265 	dma_addr_t src_addr;
266 	dma_addr_t dst_addr;
267 	enum dma_slave_buswidth src_addr_width;
268 	enum dma_slave_buswidth dst_addr_width;
269 	u32 src_maxburst;
270 	u32 dst_maxburst;
271 };
272 
273 static inline const char *dma_chan_name(struct dma_chan *chan)
274 {
275 	return dev_name(&chan->dev->device);
276 }
277 
278 void dma_chan_cleanup(struct kref *kref);
279 
280 /**
281  * typedef dma_filter_fn - callback filter for dma_request_channel
282  * @chan: channel to be reviewed
283  * @filter_param: opaque parameter passed through dma_request_channel
284  *
285  * When this optional parameter is specified in a call to dma_request_channel a
286  * suitable channel is passed to this routine for further dispositioning before
287  * being returned.  Where 'suitable' indicates a non-busy channel that
288  * satisfies the given capability mask.  It returns 'true' to indicate that the
289  * channel is suitable.
290  */
291 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
292 
293 typedef void (*dma_async_tx_callback)(void *dma_async_param);
294 /**
295  * struct dma_async_tx_descriptor - async transaction descriptor
296  * ---dma generic offload fields---
297  * @cookie: tracking cookie for this transaction, set to -EBUSY if
298  *	this tx is sitting on a dependency list
299  * @flags: flags to augment operation preparation, control completion, and
300  * 	communicate status
301  * @phys: physical address of the descriptor
302  * @chan: target channel for this operation
303  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
304  * @callback: routine to call after this operation is complete
305  * @callback_param: general parameter to pass to the callback routine
306  * ---async_tx api specific fields---
307  * @next: at completion submit this descriptor
308  * @parent: pointer to the next level up in the dependency chain
309  * @lock: protect the parent and next pointers
310  */
311 struct dma_async_tx_descriptor {
312 	dma_cookie_t cookie;
313 	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
314 	dma_addr_t phys;
315 	struct dma_chan *chan;
316 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
317 	dma_async_tx_callback callback;
318 	void *callback_param;
319 #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
320 	struct dma_async_tx_descriptor *next;
321 	struct dma_async_tx_descriptor *parent;
322 	spinlock_t lock;
323 #endif
324 };
325 
326 #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
327 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
328 {
329 }
330 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
331 {
332 }
333 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
334 {
335 	BUG();
336 }
337 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
338 {
339 }
340 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
341 {
342 }
343 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
344 {
345 	return NULL;
346 }
347 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
348 {
349 	return NULL;
350 }
351 
352 #else
353 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
354 {
355 	spin_lock_bh(&txd->lock);
356 }
357 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
358 {
359 	spin_unlock_bh(&txd->lock);
360 }
361 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
362 {
363 	txd->next = next;
364 	next->parent = txd;
365 }
366 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
367 {
368 	txd->parent = NULL;
369 }
370 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
371 {
372 	txd->next = NULL;
373 }
374 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
375 {
376 	return txd->parent;
377 }
378 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
379 {
380 	return txd->next;
381 }
382 #endif
383 
384 /**
385  * struct dma_tx_state - filled in to report the status of
386  * a transfer.
387  * @last: last completed DMA cookie
388  * @used: last issued DMA cookie (i.e. the one in progress)
389  * @residue: the remaining number of bytes left to transmit
390  *	on the selected transfer for states DMA_IN_PROGRESS and
391  *	DMA_PAUSED if this is implemented in the driver, else 0
392  */
393 struct dma_tx_state {
394 	dma_cookie_t last;
395 	dma_cookie_t used;
396 	u32 residue;
397 };
398 
399 /**
400  * struct dma_device - info on the entity supplying DMA services
401  * @chancnt: how many DMA channels are supported
402  * @privatecnt: how many DMA channels are requested by dma_request_channel
403  * @channels: the list of struct dma_chan
404  * @global_node: list_head for global dma_device_list
405  * @cap_mask: one or more dma_capability flags
406  * @max_xor: maximum number of xor sources, 0 if no capability
407  * @max_pq: maximum number of PQ sources and PQ-continue capability
408  * @copy_align: alignment shift for memcpy operations
409  * @xor_align: alignment shift for xor operations
410  * @pq_align: alignment shift for pq operations
411  * @fill_align: alignment shift for memset operations
412  * @dev_id: unique device ID
413  * @dev: struct device reference for dma mapping api
414  * @device_alloc_chan_resources: allocate resources and return the
415  *	number of allocated descriptors
416  * @device_free_chan_resources: release DMA channel's resources
417  * @device_prep_dma_memcpy: prepares a memcpy operation
418  * @device_prep_dma_xor: prepares a xor operation
419  * @device_prep_dma_xor_val: prepares a xor validation operation
420  * @device_prep_dma_pq: prepares a pq operation
421  * @device_prep_dma_pq_val: prepares a pqzero_sum operation
422  * @device_prep_dma_memset: prepares a memset operation
423  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
424  * @device_prep_slave_sg: prepares a slave dma operation
425  * @device_control: manipulate all pending operations on a channel, returns
426  *	zero or error code
427  * @device_tx_status: poll for transaction completion, the optional
428  *	txstate parameter can be supplied with a pointer to get a
429  *	struct with auxilary transfer status information, otherwise the call
430  *	will just return a simple status code
431  * @device_issue_pending: push pending transactions to hardware
432  */
433 struct dma_device {
434 
435 	unsigned int chancnt;
436 	unsigned int privatecnt;
437 	struct list_head channels;
438 	struct list_head global_node;
439 	dma_cap_mask_t  cap_mask;
440 	unsigned short max_xor;
441 	unsigned short max_pq;
442 	u8 copy_align;
443 	u8 xor_align;
444 	u8 pq_align;
445 	u8 fill_align;
446 	#define DMA_HAS_PQ_CONTINUE (1 << 15)
447 
448 	int dev_id;
449 	struct device *dev;
450 
451 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
452 	void (*device_free_chan_resources)(struct dma_chan *chan);
453 
454 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
455 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
456 		size_t len, unsigned long flags);
457 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
458 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
459 		unsigned int src_cnt, size_t len, unsigned long flags);
460 	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
461 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
462 		size_t len, enum sum_check_flags *result, unsigned long flags);
463 	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
464 		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
465 		unsigned int src_cnt, const unsigned char *scf,
466 		size_t len, unsigned long flags);
467 	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
468 		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
469 		unsigned int src_cnt, const unsigned char *scf, size_t len,
470 		enum sum_check_flags *pqres, unsigned long flags);
471 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
472 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
473 		unsigned long flags);
474 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
475 		struct dma_chan *chan, unsigned long flags);
476 
477 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
478 		struct dma_chan *chan, struct scatterlist *sgl,
479 		unsigned int sg_len, enum dma_data_direction direction,
480 		unsigned long flags);
481 	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
482 		unsigned long arg);
483 
484 	enum dma_status (*device_tx_status)(struct dma_chan *chan,
485 					    dma_cookie_t cookie,
486 					    struct dma_tx_state *txstate);
487 	void (*device_issue_pending)(struct dma_chan *chan);
488 };
489 
490 static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
491 {
492 	size_t mask;
493 
494 	if (!align)
495 		return true;
496 	mask = (1 << align) - 1;
497 	if (mask & (off1 | off2 | len))
498 		return false;
499 	return true;
500 }
501 
502 static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
503 				       size_t off2, size_t len)
504 {
505 	return dmaengine_check_align(dev->copy_align, off1, off2, len);
506 }
507 
508 static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
509 				      size_t off2, size_t len)
510 {
511 	return dmaengine_check_align(dev->xor_align, off1, off2, len);
512 }
513 
514 static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
515 				     size_t off2, size_t len)
516 {
517 	return dmaengine_check_align(dev->pq_align, off1, off2, len);
518 }
519 
520 static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
521 				       size_t off2, size_t len)
522 {
523 	return dmaengine_check_align(dev->fill_align, off1, off2, len);
524 }
525 
526 static inline void
527 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
528 {
529 	dma->max_pq = maxpq;
530 	if (has_pq_continue)
531 		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
532 }
533 
534 static inline bool dmaf_continue(enum dma_ctrl_flags flags)
535 {
536 	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
537 }
538 
539 static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
540 {
541 	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
542 
543 	return (flags & mask) == mask;
544 }
545 
546 static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
547 {
548 	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
549 }
550 
551 static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
552 {
553 	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
554 }
555 
556 /* dma_maxpq - reduce maxpq in the face of continued operations
557  * @dma - dma device with PQ capability
558  * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
559  *
560  * When an engine does not support native continuation we need 3 extra
561  * source slots to reuse P and Q with the following coefficients:
562  * 1/ {00} * P : remove P from Q', but use it as a source for P'
563  * 2/ {01} * Q : use Q to continue Q' calculation
564  * 3/ {00} * Q : subtract Q from P' to cancel (2)
565  *
566  * In the case where P is disabled we only need 1 extra source:
567  * 1/ {01} * Q : use Q to continue Q' calculation
568  */
569 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
570 {
571 	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
572 		return dma_dev_to_maxpq(dma);
573 	else if (dmaf_p_disabled_continue(flags))
574 		return dma_dev_to_maxpq(dma) - 1;
575 	else if (dmaf_continue(flags))
576 		return dma_dev_to_maxpq(dma) - 3;
577 	BUG();
578 }
579 
580 /* --- public DMA engine API --- */
581 
582 #ifdef CONFIG_DMA_ENGINE
583 void dmaengine_get(void);
584 void dmaengine_put(void);
585 #else
586 static inline void dmaengine_get(void)
587 {
588 }
589 static inline void dmaengine_put(void)
590 {
591 }
592 #endif
593 
594 #ifdef CONFIG_NET_DMA
595 #define net_dmaengine_get()	dmaengine_get()
596 #define net_dmaengine_put()	dmaengine_put()
597 #else
598 static inline void net_dmaengine_get(void)
599 {
600 }
601 static inline void net_dmaengine_put(void)
602 {
603 }
604 #endif
605 
606 #ifdef CONFIG_ASYNC_TX_DMA
607 #define async_dmaengine_get()	dmaengine_get()
608 #define async_dmaengine_put()	dmaengine_put()
609 #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
610 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
611 #else
612 #define async_dma_find_channel(type) dma_find_channel(type)
613 #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
614 #else
615 static inline void async_dmaengine_get(void)
616 {
617 }
618 static inline void async_dmaengine_put(void)
619 {
620 }
621 static inline struct dma_chan *
622 async_dma_find_channel(enum dma_transaction_type type)
623 {
624 	return NULL;
625 }
626 #endif /* CONFIG_ASYNC_TX_DMA */
627 
628 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
629 	void *dest, void *src, size_t len);
630 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
631 	struct page *page, unsigned int offset, void *kdata, size_t len);
632 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
633 	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
634 	unsigned int src_off, size_t len);
635 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
636 	struct dma_chan *chan);
637 
638 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
639 {
640 	tx->flags |= DMA_CTRL_ACK;
641 }
642 
643 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
644 {
645 	tx->flags &= ~DMA_CTRL_ACK;
646 }
647 
648 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
649 {
650 	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
651 }
652 
653 #define first_dma_cap(mask) __first_dma_cap(&(mask))
654 static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
655 {
656 	return min_t(int, DMA_TX_TYPE_END,
657 		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
658 }
659 
660 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
661 static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
662 {
663 	return min_t(int, DMA_TX_TYPE_END,
664 		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
665 }
666 
667 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
668 static inline void
669 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
670 {
671 	set_bit(tx_type, dstp->bits);
672 }
673 
674 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
675 static inline void
676 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
677 {
678 	clear_bit(tx_type, dstp->bits);
679 }
680 
681 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
682 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
683 {
684 	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
685 }
686 
687 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
688 static inline int
689 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
690 {
691 	return test_bit(tx_type, srcp->bits);
692 }
693 
694 #define for_each_dma_cap_mask(cap, mask) \
695 	for ((cap) = first_dma_cap(mask);	\
696 		(cap) < DMA_TX_TYPE_END;	\
697 		(cap) = next_dma_cap((cap), (mask)))
698 
699 /**
700  * dma_async_issue_pending - flush pending transactions to HW
701  * @chan: target DMA channel
702  *
703  * This allows drivers to push copies to HW in batches,
704  * reducing MMIO writes where possible.
705  */
706 static inline void dma_async_issue_pending(struct dma_chan *chan)
707 {
708 	chan->device->device_issue_pending(chan);
709 }
710 
711 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
712 
713 /**
714  * dma_async_is_tx_complete - poll for transaction completion
715  * @chan: DMA channel
716  * @cookie: transaction identifier to check status of
717  * @last: returns last completed cookie, can be NULL
718  * @used: returns last issued cookie, can be NULL
719  *
720  * If @last and @used are passed in, upon return they reflect the driver
721  * internal state and can be used with dma_async_is_complete() to check
722  * the status of multiple cookies without re-checking hardware state.
723  */
724 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
725 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
726 {
727 	struct dma_tx_state state;
728 	enum dma_status status;
729 
730 	status = chan->device->device_tx_status(chan, cookie, &state);
731 	if (last)
732 		*last = state.last;
733 	if (used)
734 		*used = state.used;
735 	return status;
736 }
737 
738 #define dma_async_memcpy_complete(chan, cookie, last, used)\
739 	dma_async_is_tx_complete(chan, cookie, last, used)
740 
741 /**
742  * dma_async_is_complete - test a cookie against chan state
743  * @cookie: transaction identifier to test status of
744  * @last_complete: last know completed transaction
745  * @last_used: last cookie value handed out
746  *
747  * dma_async_is_complete() is used in dma_async_memcpy_complete()
748  * the test logic is separated for lightweight testing of multiple cookies
749  */
750 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
751 			dma_cookie_t last_complete, dma_cookie_t last_used)
752 {
753 	if (last_complete <= last_used) {
754 		if ((cookie <= last_complete) || (cookie > last_used))
755 			return DMA_SUCCESS;
756 	} else {
757 		if ((cookie <= last_complete) && (cookie > last_used))
758 			return DMA_SUCCESS;
759 	}
760 	return DMA_IN_PROGRESS;
761 }
762 
763 static inline void
764 dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
765 {
766 	if (st) {
767 		st->last = last;
768 		st->used = used;
769 		st->residue = residue;
770 	}
771 }
772 
773 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
774 #ifdef CONFIG_DMA_ENGINE
775 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
776 void dma_issue_pending_all(void);
777 #else
778 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
779 {
780 	return DMA_SUCCESS;
781 }
782 static inline void dma_issue_pending_all(void)
783 {
784 	do { } while (0);
785 }
786 #endif
787 
788 /* --- DMA device --- */
789 
790 int dma_async_device_register(struct dma_device *device);
791 void dma_async_device_unregister(struct dma_device *device);
792 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
793 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
794 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
795 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
796 void dma_release_channel(struct dma_chan *chan);
797 
798 /* --- Helper iov-locking functions --- */
799 
800 struct dma_page_list {
801 	char __user *base_address;
802 	int nr_pages;
803 	struct page **pages;
804 };
805 
806 struct dma_pinned_list {
807 	int nr_iovecs;
808 	struct dma_page_list page_list[0];
809 };
810 
811 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
812 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
813 
814 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
815 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
816 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
817 	struct dma_pinned_list *pinned_list, struct page *page,
818 	unsigned int offset, size_t len);
819 
820 #endif /* DMAENGINE_H */
821