xref: /linux-6.15/include/linux/dmaengine.h (revision e73173db)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 #ifndef DMAENGINE_H
22 #define DMAENGINE_H
23 
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/dma-mapping.h>
27 
28 /**
29  * typedef dma_cookie_t - an opaque DMA cookie
30  *
31  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
32  */
33 typedef s32 dma_cookie_t;
34 
35 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
36 
37 /**
38  * enum dma_status - DMA transaction status
39  * @DMA_SUCCESS: transaction completed successfully
40  * @DMA_IN_PROGRESS: transaction not yet processed
41  * @DMA_ERROR: transaction failed
42  */
43 enum dma_status {
44 	DMA_SUCCESS,
45 	DMA_IN_PROGRESS,
46 	DMA_ERROR,
47 };
48 
49 /**
50  * enum dma_transaction_type - DMA transaction types/indexes
51  */
52 enum dma_transaction_type {
53 	DMA_MEMCPY,
54 	DMA_XOR,
55 	DMA_PQ_XOR,
56 	DMA_DUAL_XOR,
57 	DMA_PQ_UPDATE,
58 	DMA_ZERO_SUM,
59 	DMA_PQ_ZERO_SUM,
60 	DMA_MEMSET,
61 	DMA_MEMCPY_CRC32C,
62 	DMA_INTERRUPT,
63 	DMA_PRIVATE,
64 	DMA_SLAVE,
65 };
66 
67 /* last transaction type for creation of the capabilities mask */
68 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
69 
70 
71 /**
72  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
73  * 	control completion, and communicate status.
74  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
75  * 	this transaction
76  * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
77  * 	acknowledges receipt, i.e. has has a chance to establish any
78  * 	dependency chains
79  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
80  * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
81  */
82 enum dma_ctrl_flags {
83 	DMA_PREP_INTERRUPT = (1 << 0),
84 	DMA_CTRL_ACK = (1 << 1),
85 	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
86 	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
87 };
88 
89 /**
90  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
91  * See linux/cpumask.h
92  */
93 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
94 
95 /**
96  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
97  * @memcpy_count: transaction counter
98  * @bytes_transferred: byte counter
99  */
100 
101 struct dma_chan_percpu {
102 	/* stats */
103 	unsigned long memcpy_count;
104 	unsigned long bytes_transferred;
105 };
106 
107 /**
108  * struct dma_chan - devices supply DMA channels, clients use them
109  * @device: ptr to the dma device who supplies this channel, always !%NULL
110  * @cookie: last cookie value returned to client
111  * @chan_id: channel ID for sysfs
112  * @dev: class device for sysfs
113  * @device_node: used to add this to the device chan list
114  * @local: per-cpu pointer to a struct dma_chan_percpu
115  * @client-count: how many clients are using this channel
116  * @table_count: number of appearances in the mem-to-mem allocation table
117  * @private: private data for certain client-channel associations
118  */
119 struct dma_chan {
120 	struct dma_device *device;
121 	dma_cookie_t cookie;
122 
123 	/* sysfs */
124 	int chan_id;
125 	struct dma_chan_dev *dev;
126 
127 	struct list_head device_node;
128 	struct dma_chan_percpu *local;
129 	int client_count;
130 	int table_count;
131 	void *private;
132 };
133 
134 /**
135  * struct dma_chan_dev - relate sysfs device node to backing channel device
136  * @chan - driver channel device
137  * @device - sysfs device
138  * @dev_id - parent dma_device dev_id
139  * @idr_ref - reference count to gate release of dma_device dev_id
140  */
141 struct dma_chan_dev {
142 	struct dma_chan *chan;
143 	struct device device;
144 	int dev_id;
145 	atomic_t *idr_ref;
146 };
147 
148 static inline const char *dma_chan_name(struct dma_chan *chan)
149 {
150 	return dev_name(&chan->dev->device);
151 }
152 
153 void dma_chan_cleanup(struct kref *kref);
154 
155 /**
156  * typedef dma_filter_fn - callback filter for dma_request_channel
157  * @chan: channel to be reviewed
158  * @filter_param: opaque parameter passed through dma_request_channel
159  *
160  * When this optional parameter is specified in a call to dma_request_channel a
161  * suitable channel is passed to this routine for further dispositioning before
162  * being returned.  Where 'suitable' indicates a non-busy channel that
163  * satisfies the given capability mask.  It returns 'true' to indicate that the
164  * channel is suitable.
165  */
166 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
167 
168 typedef void (*dma_async_tx_callback)(void *dma_async_param);
169 /**
170  * struct dma_async_tx_descriptor - async transaction descriptor
171  * ---dma generic offload fields---
172  * @cookie: tracking cookie for this transaction, set to -EBUSY if
173  *	this tx is sitting on a dependency list
174  * @flags: flags to augment operation preparation, control completion, and
175  * 	communicate status
176  * @phys: physical address of the descriptor
177  * @tx_list: driver common field for operations that require multiple
178  *	descriptors
179  * @chan: target channel for this operation
180  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
181  * @callback: routine to call after this operation is complete
182  * @callback_param: general parameter to pass to the callback routine
183  * ---async_tx api specific fields---
184  * @next: at completion submit this descriptor
185  * @parent: pointer to the next level up in the dependency chain
186  * @lock: protect the parent and next pointers
187  */
188 struct dma_async_tx_descriptor {
189 	dma_cookie_t cookie;
190 	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
191 	dma_addr_t phys;
192 	struct list_head tx_list;
193 	struct dma_chan *chan;
194 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
195 	dma_async_tx_callback callback;
196 	void *callback_param;
197 	struct dma_async_tx_descriptor *next;
198 	struct dma_async_tx_descriptor *parent;
199 	spinlock_t lock;
200 };
201 
202 /**
203  * struct dma_device - info on the entity supplying DMA services
204  * @chancnt: how many DMA channels are supported
205  * @privatecnt: how many DMA channels are requested by dma_request_channel
206  * @channels: the list of struct dma_chan
207  * @global_node: list_head for global dma_device_list
208  * @cap_mask: one or more dma_capability flags
209  * @max_xor: maximum number of xor sources, 0 if no capability
210  * @dev_id: unique device ID
211  * @dev: struct device reference for dma mapping api
212  * @device_alloc_chan_resources: allocate resources and return the
213  *	number of allocated descriptors
214  * @device_free_chan_resources: release DMA channel's resources
215  * @device_prep_dma_memcpy: prepares a memcpy operation
216  * @device_prep_dma_xor: prepares a xor operation
217  * @device_prep_dma_zero_sum: prepares a zero_sum operation
218  * @device_prep_dma_memset: prepares a memset operation
219  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
220  * @device_prep_slave_sg: prepares a slave dma operation
221  * @device_terminate_all: terminate all pending operations
222  * @device_is_tx_complete: poll for transaction completion
223  * @device_issue_pending: push pending transactions to hardware
224  */
225 struct dma_device {
226 
227 	unsigned int chancnt;
228 	unsigned int privatecnt;
229 	struct list_head channels;
230 	struct list_head global_node;
231 	dma_cap_mask_t  cap_mask;
232 	int max_xor;
233 
234 	int dev_id;
235 	struct device *dev;
236 
237 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
238 	void (*device_free_chan_resources)(struct dma_chan *chan);
239 
240 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
241 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
242 		size_t len, unsigned long flags);
243 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
244 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
245 		unsigned int src_cnt, size_t len, unsigned long flags);
246 	struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
247 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
248 		size_t len, u32 *result, unsigned long flags);
249 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
250 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
251 		unsigned long flags);
252 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
253 		struct dma_chan *chan, unsigned long flags);
254 
255 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
256 		struct dma_chan *chan, struct scatterlist *sgl,
257 		unsigned int sg_len, enum dma_data_direction direction,
258 		unsigned long flags);
259 	void (*device_terminate_all)(struct dma_chan *chan);
260 
261 	enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
262 			dma_cookie_t cookie, dma_cookie_t *last,
263 			dma_cookie_t *used);
264 	void (*device_issue_pending)(struct dma_chan *chan);
265 };
266 
267 /* --- public DMA engine API --- */
268 
269 #ifdef CONFIG_DMA_ENGINE
270 void dmaengine_get(void);
271 void dmaengine_put(void);
272 #else
273 static inline void dmaengine_get(void)
274 {
275 }
276 static inline void dmaengine_put(void)
277 {
278 }
279 #endif
280 
281 #ifdef CONFIG_NET_DMA
282 #define net_dmaengine_get()	dmaengine_get()
283 #define net_dmaengine_put()	dmaengine_put()
284 #else
285 static inline void net_dmaengine_get(void)
286 {
287 }
288 static inline void net_dmaengine_put(void)
289 {
290 }
291 #endif
292 
293 #ifdef CONFIG_ASYNC_TX_DMA
294 #define async_dmaengine_get()	dmaengine_get()
295 #define async_dmaengine_put()	dmaengine_put()
296 #define async_dma_find_channel(type) dma_find_channel(type)
297 #else
298 static inline void async_dmaengine_get(void)
299 {
300 }
301 static inline void async_dmaengine_put(void)
302 {
303 }
304 static inline struct dma_chan *
305 async_dma_find_channel(enum dma_transaction_type type)
306 {
307 	return NULL;
308 }
309 #endif
310 
311 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
312 	void *dest, void *src, size_t len);
313 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
314 	struct page *page, unsigned int offset, void *kdata, size_t len);
315 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
316 	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
317 	unsigned int src_off, size_t len);
318 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
319 	struct dma_chan *chan);
320 
321 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
322 {
323 	tx->flags |= DMA_CTRL_ACK;
324 }
325 
326 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
327 {
328 	tx->flags &= ~DMA_CTRL_ACK;
329 }
330 
331 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
332 {
333 	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
334 }
335 
336 #define first_dma_cap(mask) __first_dma_cap(&(mask))
337 static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
338 {
339 	return min_t(int, DMA_TX_TYPE_END,
340 		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
341 }
342 
343 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
344 static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
345 {
346 	return min_t(int, DMA_TX_TYPE_END,
347 		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
348 }
349 
350 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
351 static inline void
352 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
353 {
354 	set_bit(tx_type, dstp->bits);
355 }
356 
357 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
358 static inline void
359 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
360 {
361 	clear_bit(tx_type, dstp->bits);
362 }
363 
364 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
365 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
366 {
367 	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
368 }
369 
370 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
371 static inline int
372 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
373 {
374 	return test_bit(tx_type, srcp->bits);
375 }
376 
377 #define for_each_dma_cap_mask(cap, mask) \
378 	for ((cap) = first_dma_cap(mask);	\
379 		(cap) < DMA_TX_TYPE_END;	\
380 		(cap) = next_dma_cap((cap), (mask)))
381 
382 /**
383  * dma_async_issue_pending - flush pending transactions to HW
384  * @chan: target DMA channel
385  *
386  * This allows drivers to push copies to HW in batches,
387  * reducing MMIO writes where possible.
388  */
389 static inline void dma_async_issue_pending(struct dma_chan *chan)
390 {
391 	chan->device->device_issue_pending(chan);
392 }
393 
394 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
395 
396 /**
397  * dma_async_is_tx_complete - poll for transaction completion
398  * @chan: DMA channel
399  * @cookie: transaction identifier to check status of
400  * @last: returns last completed cookie, can be NULL
401  * @used: returns last issued cookie, can be NULL
402  *
403  * If @last and @used are passed in, upon return they reflect the driver
404  * internal state and can be used with dma_async_is_complete() to check
405  * the status of multiple cookies without re-checking hardware state.
406  */
407 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
408 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
409 {
410 	return chan->device->device_is_tx_complete(chan, cookie, last, used);
411 }
412 
413 #define dma_async_memcpy_complete(chan, cookie, last, used)\
414 	dma_async_is_tx_complete(chan, cookie, last, used)
415 
416 /**
417  * dma_async_is_complete - test a cookie against chan state
418  * @cookie: transaction identifier to test status of
419  * @last_complete: last know completed transaction
420  * @last_used: last cookie value handed out
421  *
422  * dma_async_is_complete() is used in dma_async_memcpy_complete()
423  * the test logic is separated for lightweight testing of multiple cookies
424  */
425 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
426 			dma_cookie_t last_complete, dma_cookie_t last_used)
427 {
428 	if (last_complete <= last_used) {
429 		if ((cookie <= last_complete) || (cookie > last_used))
430 			return DMA_SUCCESS;
431 	} else {
432 		if ((cookie <= last_complete) && (cookie > last_used))
433 			return DMA_SUCCESS;
434 	}
435 	return DMA_IN_PROGRESS;
436 }
437 
438 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
439 #ifdef CONFIG_DMA_ENGINE
440 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
441 void dma_issue_pending_all(void);
442 #else
443 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
444 {
445 	return DMA_SUCCESS;
446 }
447 static inline void dma_issue_pending_all(void)
448 {
449 	do { } while (0);
450 }
451 #endif
452 
453 /* --- DMA device --- */
454 
455 int dma_async_device_register(struct dma_device *device);
456 void dma_async_device_unregister(struct dma_device *device);
457 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
458 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
459 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
460 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
461 void dma_release_channel(struct dma_chan *chan);
462 
463 /* --- Helper iov-locking functions --- */
464 
465 struct dma_page_list {
466 	char __user *base_address;
467 	int nr_pages;
468 	struct page **pages;
469 };
470 
471 struct dma_pinned_list {
472 	int nr_iovecs;
473 	struct dma_page_list page_list[0];
474 };
475 
476 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
477 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
478 
479 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
480 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
481 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
482 	struct dma_pinned_list *pinned_list, struct page *page,
483 	unsigned int offset, size_t len);
484 
485 #endif /* DMAENGINE_H */
486