xref: /linux-6.15/include/linux/iio/buffer-dma.h (revision d8531890)
1fda8d26eSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2670b19aeSLars-Peter Clausen /*
3670b19aeSLars-Peter Clausen  * Copyright 2013-2015 Analog Devices Inc.
4670b19aeSLars-Peter Clausen  *  Author: Lars-Peter Clausen <[email protected]>
5670b19aeSLars-Peter Clausen  */
6670b19aeSLars-Peter Clausen 
7670b19aeSLars-Peter Clausen #ifndef __INDUSTRIALIO_DMA_BUFFER_H__
8670b19aeSLars-Peter Clausen #define __INDUSTRIALIO_DMA_BUFFER_H__
9670b19aeSLars-Peter Clausen 
10*d8531890SPaul Cercueil #include <linux/atomic.h>
11670b19aeSLars-Peter Clausen #include <linux/list.h>
12670b19aeSLars-Peter Clausen #include <linux/kref.h>
13670b19aeSLars-Peter Clausen #include <linux/spinlock.h>
14670b19aeSLars-Peter Clausen #include <linux/mutex.h>
15c0ae3591SLars-Peter Clausen #include <linux/iio/buffer_impl.h>
16670b19aeSLars-Peter Clausen 
17670b19aeSLars-Peter Clausen struct iio_dma_buffer_queue;
18670b19aeSLars-Peter Clausen struct iio_dma_buffer_ops;
19670b19aeSLars-Peter Clausen struct device;
20*d8531890SPaul Cercueil struct dma_buf_attachment;
21*d8531890SPaul Cercueil struct dma_fence;
22*d8531890SPaul Cercueil struct sg_table;
23670b19aeSLars-Peter Clausen 
24670b19aeSLars-Peter Clausen /**
25670b19aeSLars-Peter Clausen  * enum iio_block_state - State of a struct iio_dma_buffer_block
26670b19aeSLars-Peter Clausen  * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
27670b19aeSLars-Peter Clausen  * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
28670b19aeSLars-Peter Clausen  * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
29670b19aeSLars-Peter Clausen  * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
30670b19aeSLars-Peter Clausen  */
31670b19aeSLars-Peter Clausen enum iio_block_state {
32670b19aeSLars-Peter Clausen 	IIO_BLOCK_STATE_QUEUED,
33670b19aeSLars-Peter Clausen 	IIO_BLOCK_STATE_ACTIVE,
34670b19aeSLars-Peter Clausen 	IIO_BLOCK_STATE_DONE,
35670b19aeSLars-Peter Clausen 	IIO_BLOCK_STATE_DEAD,
36670b19aeSLars-Peter Clausen };
37670b19aeSLars-Peter Clausen 
38670b19aeSLars-Peter Clausen /**
39670b19aeSLars-Peter Clausen  * struct iio_dma_buffer_block - IIO buffer block
40670b19aeSLars-Peter Clausen  * @head: List head
41670b19aeSLars-Peter Clausen  * @size: Total size of the block in bytes
42670b19aeSLars-Peter Clausen  * @bytes_used: Number of bytes that contain valid data
43670b19aeSLars-Peter Clausen  * @vaddr: Virutal address of the blocks memory
44670b19aeSLars-Peter Clausen  * @phys_addr: Physical address of the blocks memory
45670b19aeSLars-Peter Clausen  * @queue: Parent DMA buffer queue
46670b19aeSLars-Peter Clausen  * @kref: kref used to manage the lifetime of block
47670b19aeSLars-Peter Clausen  * @state: Current state of the block
48*d8531890SPaul Cercueil  * @cyclic: True if this is a cyclic buffer
49*d8531890SPaul Cercueil  * @fileio: True if this buffer is used for fileio mode
50*d8531890SPaul Cercueil  * @sg_table: DMA table for the transfer when transferring a DMABUF
51*d8531890SPaul Cercueil  * @fence: DMA fence to be signaled when a DMABUF transfer is complete
52670b19aeSLars-Peter Clausen  */
53670b19aeSLars-Peter Clausen struct iio_dma_buffer_block {
54670b19aeSLars-Peter Clausen 	/* May only be accessed by the owner of the block */
55670b19aeSLars-Peter Clausen 	struct list_head head;
56670b19aeSLars-Peter Clausen 	size_t bytes_used;
57670b19aeSLars-Peter Clausen 
58670b19aeSLars-Peter Clausen 	/*
59670b19aeSLars-Peter Clausen 	 * Set during allocation, constant thereafter. May be accessed read-only
60670b19aeSLars-Peter Clausen 	 * by anybody holding a reference to the block.
61670b19aeSLars-Peter Clausen 	 */
62670b19aeSLars-Peter Clausen 	void *vaddr;
63670b19aeSLars-Peter Clausen 	dma_addr_t phys_addr;
64670b19aeSLars-Peter Clausen 	size_t size;
65670b19aeSLars-Peter Clausen 	struct iio_dma_buffer_queue *queue;
66670b19aeSLars-Peter Clausen 
67670b19aeSLars-Peter Clausen 	/* Must not be accessed outside the core. */
68670b19aeSLars-Peter Clausen 	struct kref kref;
69670b19aeSLars-Peter Clausen 	/*
70670b19aeSLars-Peter Clausen 	 * Must not be accessed outside the core. Access needs to hold
71670b19aeSLars-Peter Clausen 	 * queue->list_lock if the block is not owned by the core.
72670b19aeSLars-Peter Clausen 	 */
73670b19aeSLars-Peter Clausen 	enum iio_block_state state;
74*d8531890SPaul Cercueil 
75*d8531890SPaul Cercueil 	bool cyclic;
76*d8531890SPaul Cercueil 	bool fileio;
77*d8531890SPaul Cercueil 
78*d8531890SPaul Cercueil 	struct sg_table *sg_table;
79*d8531890SPaul Cercueil 	struct dma_fence *fence;
80670b19aeSLars-Peter Clausen };
81670b19aeSLars-Peter Clausen 
82670b19aeSLars-Peter Clausen /**
83670b19aeSLars-Peter Clausen  * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
84670b19aeSLars-Peter Clausen  * @blocks: Buffer blocks used for fileio
85670b19aeSLars-Peter Clausen  * @active_block: Block being used in read()
86670b19aeSLars-Peter Clausen  * @pos: Read offset in the active block
87670b19aeSLars-Peter Clausen  * @block_size: Size of each block
88ee9ec490SPaul Cercueil  * @next_dequeue: index of next block that will be dequeued
89*d8531890SPaul Cercueil  * @enabled: Whether the buffer is operating in fileio mode
90670b19aeSLars-Peter Clausen  */
91670b19aeSLars-Peter Clausen struct iio_dma_buffer_queue_fileio {
92670b19aeSLars-Peter Clausen 	struct iio_dma_buffer_block *blocks[2];
93670b19aeSLars-Peter Clausen 	struct iio_dma_buffer_block *active_block;
94670b19aeSLars-Peter Clausen 	size_t pos;
95670b19aeSLars-Peter Clausen 	size_t block_size;
96ee9ec490SPaul Cercueil 
97ee9ec490SPaul Cercueil 	unsigned int next_dequeue;
98*d8531890SPaul Cercueil 	bool enabled;
99670b19aeSLars-Peter Clausen };
100670b19aeSLars-Peter Clausen 
101670b19aeSLars-Peter Clausen /**
102670b19aeSLars-Peter Clausen  * struct iio_dma_buffer_queue - DMA buffer base structure
103670b19aeSLars-Peter Clausen  * @buffer: IIO buffer base structure
104670b19aeSLars-Peter Clausen  * @dev: Parent device
105670b19aeSLars-Peter Clausen  * @ops: DMA buffer callbacks
106670b19aeSLars-Peter Clausen  * @lock: Protects the incoming list, active and the fields in the fileio
107670b19aeSLars-Peter Clausen  *   substruct
108670b19aeSLars-Peter Clausen  * @list_lock: Protects lists that contain blocks which can be modified in
109670b19aeSLars-Peter Clausen  *   atomic context as well as blocks on those lists. This is the outgoing queue
110670b19aeSLars-Peter Clausen  *   list and typically also a list of active blocks in the part that handles
111670b19aeSLars-Peter Clausen  *   the DMA controller
112670b19aeSLars-Peter Clausen  * @incoming: List of buffers on the incoming queue
113670b19aeSLars-Peter Clausen  * @active: Whether the buffer is currently active
114*d8531890SPaul Cercueil  * @num_dmabufs: Total number of DMABUFs attached to this queue
115670b19aeSLars-Peter Clausen  * @fileio: FileIO state
116670b19aeSLars-Peter Clausen  */
117670b19aeSLars-Peter Clausen struct iio_dma_buffer_queue {
118670b19aeSLars-Peter Clausen 	struct iio_buffer buffer;
119670b19aeSLars-Peter Clausen 	struct device *dev;
120670b19aeSLars-Peter Clausen 	const struct iio_dma_buffer_ops *ops;
121670b19aeSLars-Peter Clausen 
122670b19aeSLars-Peter Clausen 	struct mutex lock;
123670b19aeSLars-Peter Clausen 	spinlock_t list_lock;
124670b19aeSLars-Peter Clausen 	struct list_head incoming;
125670b19aeSLars-Peter Clausen 
126670b19aeSLars-Peter Clausen 	bool active;
127*d8531890SPaul Cercueil 	atomic_t num_dmabufs;
128670b19aeSLars-Peter Clausen 
129670b19aeSLars-Peter Clausen 	struct iio_dma_buffer_queue_fileio fileio;
130670b19aeSLars-Peter Clausen };
131670b19aeSLars-Peter Clausen 
132670b19aeSLars-Peter Clausen /**
133670b19aeSLars-Peter Clausen  * struct iio_dma_buffer_ops - DMA buffer callback operations
134670b19aeSLars-Peter Clausen  * @submit: Called when a block is submitted to the DMA controller
135670b19aeSLars-Peter Clausen  * @abort: Should abort all pending transfers
136670b19aeSLars-Peter Clausen  */
137670b19aeSLars-Peter Clausen struct iio_dma_buffer_ops {
138670b19aeSLars-Peter Clausen 	int (*submit)(struct iio_dma_buffer_queue *queue,
139670b19aeSLars-Peter Clausen 		struct iio_dma_buffer_block *block);
140670b19aeSLars-Peter Clausen 	void (*abort)(struct iio_dma_buffer_queue *queue);
141670b19aeSLars-Peter Clausen };
142670b19aeSLars-Peter Clausen 
143670b19aeSLars-Peter Clausen void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
144670b19aeSLars-Peter Clausen void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
145670b19aeSLars-Peter Clausen 	struct list_head *list);
146670b19aeSLars-Peter Clausen 
147670b19aeSLars-Peter Clausen int iio_dma_buffer_enable(struct iio_buffer *buffer,
148670b19aeSLars-Peter Clausen 	struct iio_dev *indio_dev);
149670b19aeSLars-Peter Clausen int iio_dma_buffer_disable(struct iio_buffer *buffer,
150670b19aeSLars-Peter Clausen 	struct iio_dev *indio_dev);
151670b19aeSLars-Peter Clausen int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
152670b19aeSLars-Peter Clausen 	char __user *user_buffer);
153fb09febaSPaul Cercueil int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
154fb09febaSPaul Cercueil 			 const char __user *user_buffer);
15504ae3b1aSPaul Cercueil size_t iio_dma_buffer_usage(struct iio_buffer *buffer);
156670b19aeSLars-Peter Clausen int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
15792397a6cSPhil Reid int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
158670b19aeSLars-Peter Clausen int iio_dma_buffer_request_update(struct iio_buffer *buffer);
159670b19aeSLars-Peter Clausen 
160670b19aeSLars-Peter Clausen int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
161670b19aeSLars-Peter Clausen 	struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
162670b19aeSLars-Peter Clausen void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
163670b19aeSLars-Peter Clausen void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
164670b19aeSLars-Peter Clausen 
165*d8531890SPaul Cercueil struct iio_dma_buffer_block *
166*d8531890SPaul Cercueil iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
167*d8531890SPaul Cercueil 			     struct dma_buf_attachment *attach);
168*d8531890SPaul Cercueil void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
169*d8531890SPaul Cercueil 				  struct iio_dma_buffer_block *block);
170*d8531890SPaul Cercueil int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
171*d8531890SPaul Cercueil 				  struct iio_dma_buffer_block *block,
172*d8531890SPaul Cercueil 				  struct dma_fence *fence,
173*d8531890SPaul Cercueil 				  struct sg_table *sgt,
174*d8531890SPaul Cercueil 				  size_t size, bool cyclic);
175*d8531890SPaul Cercueil void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
176*d8531890SPaul Cercueil void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
177*d8531890SPaul Cercueil 
178670b19aeSLars-Peter Clausen #endif
179