xref: /linux-6.15/include/linux/dmaengine.h (revision 211a22ce)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 #ifndef DMAENGINE_H
22 #define DMAENGINE_H
23 
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/kref.h>
27 #include <linux/completion.h>
28 #include <linux/rcupdate.h>
29 #include <linux/dma-mapping.h>
30 
31 /**
32  * typedef dma_cookie_t - an opaque DMA cookie
33  *
34  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
35  */
36 typedef s32 dma_cookie_t;
37 
38 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
39 
40 /**
41  * enum dma_status - DMA transaction status
42  * @DMA_SUCCESS: transaction completed successfully
43  * @DMA_IN_PROGRESS: transaction not yet processed
44  * @DMA_ERROR: transaction failed
45  */
46 enum dma_status {
47 	DMA_SUCCESS,
48 	DMA_IN_PROGRESS,
49 	DMA_ERROR,
50 };
51 
52 /**
53  * enum dma_transaction_type - DMA transaction types/indexes
54  */
55 enum dma_transaction_type {
56 	DMA_MEMCPY,
57 	DMA_XOR,
58 	DMA_PQ_XOR,
59 	DMA_DUAL_XOR,
60 	DMA_PQ_UPDATE,
61 	DMA_ZERO_SUM,
62 	DMA_PQ_ZERO_SUM,
63 	DMA_MEMSET,
64 	DMA_MEMCPY_CRC32C,
65 	DMA_INTERRUPT,
66 	DMA_PRIVATE,
67 	DMA_SLAVE,
68 };
69 
70 /* last transaction type for creation of the capabilities mask */
71 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
72 
73 
74 /**
75  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
76  * 	control completion, and communicate status.
77  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
78  * 	this transaction
79  * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
80  * 	acknowledges receipt, i.e. has has a chance to establish any
81  * 	dependency chains
82  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
83  * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
84  */
85 enum dma_ctrl_flags {
86 	DMA_PREP_INTERRUPT = (1 << 0),
87 	DMA_CTRL_ACK = (1 << 1),
88 	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
89 	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
90 };
91 
92 /**
93  * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
94  * See linux/cpumask.h
95  */
96 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
97 
98 /**
99  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
100  * @memcpy_count: transaction counter
101  * @bytes_transferred: byte counter
102  */
103 
104 struct dma_chan_percpu {
105 	/* stats */
106 	unsigned long memcpy_count;
107 	unsigned long bytes_transferred;
108 };
109 
110 /**
111  * struct dma_chan - devices supply DMA channels, clients use them
112  * @device: ptr to the dma device who supplies this channel, always !%NULL
113  * @cookie: last cookie value returned to client
114  * @chan_id: channel ID for sysfs
115  * @dev: class device for sysfs
116  * @device_node: used to add this to the device chan list
117  * @local: per-cpu pointer to a struct dma_chan_percpu
118  * @client-count: how many clients are using this channel
119  * @table_count: number of appearances in the mem-to-mem allocation table
120  */
121 struct dma_chan {
122 	struct dma_device *device;
123 	dma_cookie_t cookie;
124 
125 	/* sysfs */
126 	int chan_id;
127 	struct dma_chan_dev *dev;
128 
129 	struct list_head device_node;
130 	struct dma_chan_percpu *local;
131 	int client_count;
132 	int table_count;
133 };
134 
135 /**
136  * struct dma_chan_dev - relate sysfs device node to backing channel device
137  * @chan - driver channel device
138  * @device - sysfs device
139  * @dev_id - parent dma_device dev_id
140  * @idr_ref - reference count to gate release of dma_device dev_id
141  */
142 struct dma_chan_dev {
143 	struct dma_chan *chan;
144 	struct device device;
145 	int dev_id;
146 	atomic_t *idr_ref;
147 };
148 
149 static inline const char *dma_chan_name(struct dma_chan *chan)
150 {
151 	return dev_name(&chan->dev->device);
152 }
153 
154 void dma_chan_cleanup(struct kref *kref);
155 
156 /**
157  * typedef dma_filter_fn - callback filter for dma_request_channel
158  * @chan: channel to be reviewed
159  * @filter_param: opaque parameter passed through dma_request_channel
160  *
161  * When this optional parameter is specified in a call to dma_request_channel a
162  * suitable channel is passed to this routine for further dispositioning before
163  * being returned.  Where 'suitable' indicates a non-busy channel that
164  * satisfies the given capability mask.  It returns 'true' to indicate that the
165  * channel is suitable.
166  */
167 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
168 
169 typedef void (*dma_async_tx_callback)(void *dma_async_param);
170 /**
171  * struct dma_async_tx_descriptor - async transaction descriptor
172  * ---dma generic offload fields---
173  * @cookie: tracking cookie for this transaction, set to -EBUSY if
174  *	this tx is sitting on a dependency list
175  * @flags: flags to augment operation preparation, control completion, and
176  * 	communicate status
177  * @phys: physical address of the descriptor
178  * @tx_list: driver common field for operations that require multiple
179  *	descriptors
180  * @chan: target channel for this operation
181  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
182  * @callback: routine to call after this operation is complete
183  * @callback_param: general parameter to pass to the callback routine
184  * ---async_tx api specific fields---
185  * @next: at completion submit this descriptor
186  * @parent: pointer to the next level up in the dependency chain
187  * @lock: protect the parent and next pointers
188  */
189 struct dma_async_tx_descriptor {
190 	dma_cookie_t cookie;
191 	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
192 	dma_addr_t phys;
193 	struct list_head tx_list;
194 	struct dma_chan *chan;
195 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
196 	dma_async_tx_callback callback;
197 	void *callback_param;
198 	struct dma_async_tx_descriptor *next;
199 	struct dma_async_tx_descriptor *parent;
200 	spinlock_t lock;
201 };
202 
203 /**
204  * struct dma_device - info on the entity supplying DMA services
205  * @chancnt: how many DMA channels are supported
206  * @channels: the list of struct dma_chan
207  * @global_node: list_head for global dma_device_list
208  * @cap_mask: one or more dma_capability flags
209  * @max_xor: maximum number of xor sources, 0 if no capability
210  * @dev_id: unique device ID
211  * @dev: struct device reference for dma mapping api
212  * @device_alloc_chan_resources: allocate resources and return the
213  *	number of allocated descriptors
214  * @device_free_chan_resources: release DMA channel's resources
215  * @device_prep_dma_memcpy: prepares a memcpy operation
216  * @device_prep_dma_xor: prepares a xor operation
217  * @device_prep_dma_zero_sum: prepares a zero_sum operation
218  * @device_prep_dma_memset: prepares a memset operation
219  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
220  * @device_prep_slave_sg: prepares a slave dma operation
221  * @device_terminate_all: terminate all pending operations
222  * @device_is_tx_complete: poll for transaction completion
223  * @device_issue_pending: push pending transactions to hardware
224  */
225 struct dma_device {
226 
227 	unsigned int chancnt;
228 	struct list_head channels;
229 	struct list_head global_node;
230 	dma_cap_mask_t  cap_mask;
231 	int max_xor;
232 
233 	int dev_id;
234 	struct device *dev;
235 
236 	int (*device_alloc_chan_resources)(struct dma_chan *chan);
237 	void (*device_free_chan_resources)(struct dma_chan *chan);
238 
239 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
240 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
241 		size_t len, unsigned long flags);
242 	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
243 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
244 		unsigned int src_cnt, size_t len, unsigned long flags);
245 	struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
246 		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
247 		size_t len, u32 *result, unsigned long flags);
248 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
249 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
250 		unsigned long flags);
251 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
252 		struct dma_chan *chan, unsigned long flags);
253 
254 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
255 		struct dma_chan *chan, struct scatterlist *sgl,
256 		unsigned int sg_len, enum dma_data_direction direction,
257 		unsigned long flags);
258 	void (*device_terminate_all)(struct dma_chan *chan);
259 
260 	enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
261 			dma_cookie_t cookie, dma_cookie_t *last,
262 			dma_cookie_t *used);
263 	void (*device_issue_pending)(struct dma_chan *chan);
264 };
265 
266 /* --- public DMA engine API --- */
267 
268 #ifdef CONFIG_DMA_ENGINE
269 void dmaengine_get(void);
270 void dmaengine_put(void);
271 #else
272 static inline void dmaengine_get(void)
273 {
274 }
275 static inline void dmaengine_put(void)
276 {
277 }
278 #endif
279 
280 #ifdef CONFIG_NET_DMA
281 #define net_dmaengine_get()	dmaengine_get()
282 #define net_dmaengine_put()	dmaengine_put()
283 #else
284 static inline void net_dmaengine_get(void)
285 {
286 }
287 static inline void net_dmaengine_put(void)
288 {
289 }
290 #endif
291 
292 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
293 	void *dest, void *src, size_t len);
294 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
295 	struct page *page, unsigned int offset, void *kdata, size_t len);
296 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
297 	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
298 	unsigned int src_off, size_t len);
299 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
300 	struct dma_chan *chan);
301 
302 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
303 {
304 	tx->flags |= DMA_CTRL_ACK;
305 }
306 
307 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
308 {
309 	tx->flags &= ~DMA_CTRL_ACK;
310 }
311 
312 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
313 {
314 	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
315 }
316 
317 #define first_dma_cap(mask) __first_dma_cap(&(mask))
318 static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
319 {
320 	return min_t(int, DMA_TX_TYPE_END,
321 		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
322 }
323 
324 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
325 static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
326 {
327 	return min_t(int, DMA_TX_TYPE_END,
328 		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
329 }
330 
331 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
332 static inline void
333 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
334 {
335 	set_bit(tx_type, dstp->bits);
336 }
337 
338 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
339 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
340 {
341 	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
342 }
343 
344 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
345 static inline int
346 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
347 {
348 	return test_bit(tx_type, srcp->bits);
349 }
350 
351 #define for_each_dma_cap_mask(cap, mask) \
352 	for ((cap) = first_dma_cap(mask);	\
353 		(cap) < DMA_TX_TYPE_END;	\
354 		(cap) = next_dma_cap((cap), (mask)))
355 
356 /**
357  * dma_async_issue_pending - flush pending transactions to HW
358  * @chan: target DMA channel
359  *
360  * This allows drivers to push copies to HW in batches,
361  * reducing MMIO writes where possible.
362  */
363 static inline void dma_async_issue_pending(struct dma_chan *chan)
364 {
365 	chan->device->device_issue_pending(chan);
366 }
367 
368 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
369 
370 /**
371  * dma_async_is_tx_complete - poll for transaction completion
372  * @chan: DMA channel
373  * @cookie: transaction identifier to check status of
374  * @last: returns last completed cookie, can be NULL
375  * @used: returns last issued cookie, can be NULL
376  *
377  * If @last and @used are passed in, upon return they reflect the driver
378  * internal state and can be used with dma_async_is_complete() to check
379  * the status of multiple cookies without re-checking hardware state.
380  */
381 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
382 	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
383 {
384 	return chan->device->device_is_tx_complete(chan, cookie, last, used);
385 }
386 
387 #define dma_async_memcpy_complete(chan, cookie, last, used)\
388 	dma_async_is_tx_complete(chan, cookie, last, used)
389 
390 /**
391  * dma_async_is_complete - test a cookie against chan state
392  * @cookie: transaction identifier to test status of
393  * @last_complete: last know completed transaction
394  * @last_used: last cookie value handed out
395  *
396  * dma_async_is_complete() is used in dma_async_memcpy_complete()
397  * the test logic is separated for lightweight testing of multiple cookies
398  */
399 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
400 			dma_cookie_t last_complete, dma_cookie_t last_used)
401 {
402 	if (last_complete <= last_used) {
403 		if ((cookie <= last_complete) || (cookie > last_used))
404 			return DMA_SUCCESS;
405 	} else {
406 		if ((cookie <= last_complete) && (cookie > last_used))
407 			return DMA_SUCCESS;
408 	}
409 	return DMA_IN_PROGRESS;
410 }
411 
412 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
413 #ifdef CONFIG_DMA_ENGINE
414 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
415 void dma_issue_pending_all(void);
416 #else
417 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
418 {
419 	return DMA_SUCCESS;
420 }
421 static inline void dma_issue_pending_all(void)
422 {
423 	do { } while (0);
424 }
425 #endif
426 
427 /* --- DMA device --- */
428 
429 int dma_async_device_register(struct dma_device *device);
430 void dma_async_device_unregister(struct dma_device *device);
431 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
432 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
433 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
434 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
435 void dma_release_channel(struct dma_chan *chan);
436 
437 /* --- Helper iov-locking functions --- */
438 
439 struct dma_page_list {
440 	char __user *base_address;
441 	int nr_pages;
442 	struct page **pages;
443 };
444 
445 struct dma_pinned_list {
446 	int nr_iovecs;
447 	struct dma_page_list page_list[0];
448 };
449 
450 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
451 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
452 
453 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
454 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
455 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
456 	struct dma_pinned_list *pinned_list, struct page *page,
457 	unsigned int offset, size_t len);
458 
459 #endif /* DMAENGINE_H */
460