1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #ifndef __DPAA2_QDMA_H__
6 #define __DPAA2_QDMA_H__
7 
8 struct qdma_sdd;
9 struct rte_qdma_job;
10 
11 #define DPAA2_QDMA_MAX_FLE 3
12 #define DPAA2_QDMA_MAX_SDD 2
13 
14 #define DPAA2_QDMA_MAX_SG_NB 64
15 
16 #define DPAA2_DPDMAI_MAX_QUEUES	8
17 
18 /** FLE single job pool size: job pointer(uint64_t) +
19  * 3 Frame list + 2 source/destination descriptor.
20  */
21 #define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
22 			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
23 			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
24 
25 /** FLE sg jobs pool size: job number(uint64_t) +
26  * 3 Frame list + 2 source/destination descriptor  +
27  * 64 (src + dst) sg entries + 64 jobs pointers.
28  */
29 #define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
30 		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
31 		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
32 		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
33 		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
34 
35 #define QDMA_FLE_JOB_NB_OFFSET 0
36 
37 #define QDMA_FLE_SINGLE_JOB_OFFSET 0
38 
39 #define QDMA_FLE_FLE_OFFSET \
40 		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
41 
42 #define QDMA_FLE_SDD_OFFSET \
43 		(QDMA_FLE_FLE_OFFSET + \
44 		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
45 
46 #define QDMA_FLE_SG_ENTRY_OFFSET \
47 		(QDMA_FLE_SDD_OFFSET + \
48 		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
49 
50 #define QDMA_FLE_SG_JOBS_OFFSET \
51 		(QDMA_FLE_SG_ENTRY_OFFSET + \
52 		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
53 
54 /** FLE pool cache size */
55 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
56 
57 /** Notification by FQD_CTX[fqid] */
58 #define QDMA_SER_CTX (1 << 8)
59 #define DPAA2_RBP_MEM_RW            0x0
60 /**
61  * Source descriptor command read transaction type for RBP=0:
62  * coherent copy of cacheable memory
63  */
64 #define DPAA2_COHERENT_NO_ALLOCATE_CACHE	0xb
65 #define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE	0x7
66 /**
67  * Destination descriptor command write transaction type for RBP=0:
68  * coherent copy of cacheable memory
69  */
70 #define DPAA2_COHERENT_ALLOCATE_CACHE		0x6
71 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
72 
73 /** Maximum possible H/W Queues on each core */
74 #define MAX_HW_QUEUE_PER_CORE		64
75 
76 #define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
77 /**
78  * Represents a QDMA device.
79  * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
80  */
81 struct qdma_device {
82 	/** total number of hw queues. */
83 	uint16_t num_hw_queues;
84 	/**
85 	 * Maximum number of hw queues to be alocated per core.
86 	 * This is limited by MAX_HW_QUEUE_PER_CORE
87 	 */
88 	uint16_t max_hw_queues_per_core;
89 
90 	/** VQ's of this device */
91 	struct qdma_virt_queue *vqs;
92 	/** Maximum number of VQ's */
93 	uint16_t max_vqs;
94 	/** Device state - started or stopped */
95 	uint8_t state;
96 	/** FLE queue pool size */
97 	int fle_queue_pool_cnt;
98 	/** A lock to QDMA device whenever required */
99 	rte_spinlock_t lock;
100 };
101 
102 /** Represents a QDMA H/W queue */
103 struct qdma_hw_queue {
104 	/** Pointer to Next instance */
105 	TAILQ_ENTRY(qdma_hw_queue) next;
106 	/** DPDMAI device to communicate with HW */
107 	struct dpaa2_dpdmai_dev *dpdmai_dev;
108 	/** queue ID to communicate with HW */
109 	uint16_t queue_id;
110 	/** Associated lcore id */
111 	uint32_t lcore_id;
112 	/** Number of users of this hw queue */
113 	uint32_t num_users;
114 };
115 
116 struct qdma_virt_queue;
117 
118 typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
119 					const struct qbman_fd *fd,
120 					struct rte_qdma_job **job,
121 					uint16_t *nb_jobs);
122 typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
123 					struct qbman_fd *fd,
124 					struct rte_qdma_job **job,
125 					uint16_t nb_jobs);
126 
127 typedef int (qdma_dequeue_multijob_t)(
128 				struct qdma_virt_queue *qdma_vq,
129 				uint16_t *vq_id,
130 				struct rte_qdma_job **job,
131 				uint16_t nb_jobs);
132 
133 typedef int (qdma_enqueue_multijob_t)(
134 			struct qdma_virt_queue *qdma_vq,
135 			struct rte_qdma_job **job,
136 			uint16_t nb_jobs);
137 
138 /** Represents a QDMA virtual queue */
139 struct qdma_virt_queue {
140 	/** Status ring of the virtual queue */
141 	struct rte_ring *status_ring;
142 	/** Associated hw queue */
143 	struct qdma_hw_queue *hw_queue;
144 	/** FLE pool for the queue */
145 	struct rte_mempool *fle_pool;
146 	/** Route by port */
147 	struct rte_qdma_rbp rbp;
148 	/** Associated lcore id */
149 	uint32_t lcore_id;
150 	/** States if this vq is in use or not */
151 	uint8_t in_use;
152 	/** States if this vq has exclusively associated hw queue */
153 	uint8_t exclusive_hw_queue;
154 	/* Total number of enqueues on this VQ */
155 	uint64_t num_enqueues;
156 	/* Total number of dequeues from this VQ */
157 	uint64_t num_dequeues;
158 
159 	uint16_t vq_id;
160 	uint32_t flags;
161 
162 	qdma_set_fd_t *set_fd;
163 	qdma_get_job_t *get_job;
164 
165 	qdma_dequeue_multijob_t *dequeue_job;
166 	qdma_enqueue_multijob_t *enqueue_job;
167 };
168 
169 /** Represents a QDMA per core hw queues allocation in virtual mode */
170 struct qdma_per_core_info {
171 	/** list for allocated hw queues */
172 	struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
173 	/* Number of hw queues allocated for this core */
174 	uint16_t num_hw_queues;
175 };
176 
177 /** Source/Destination Descriptor */
178 struct qdma_sdd {
179 	uint32_t rsv;
180 	/** Stride configuration */
181 	uint32_t stride;
182 	/** Route-by-port command */
183 	union {
184 		uint32_t rbpcmd;
185 		struct rbpcmd_st {
186 			uint32_t vfid:6;
187 			uint32_t rsv4:2;
188 			uint32_t pfid:1;
189 			uint32_t rsv3:7;
190 			uint32_t attr:3;
191 			uint32_t rsv2:1;
192 			uint32_t at:2;
193 			uint32_t vfa:1;
194 			uint32_t ca:1;
195 			uint32_t tc:3;
196 			uint32_t rsv1:5;
197 		} rbpcmd_simple;
198 	};
199 	union {
200 		uint32_t cmd;
201 		struct rcmd_simple {
202 			uint32_t portid:4;
203 			uint32_t rsv1:14;
204 			uint32_t rbp:1;
205 			uint32_t ssen:1;
206 			uint32_t rthrotl:4;
207 			uint32_t sqos:3;
208 			uint32_t ns:1;
209 			uint32_t rdtype:4;
210 		} read_cmd;
211 		struct wcmd_simple {
212 			uint32_t portid:4;
213 			uint32_t rsv3:10;
214 			uint32_t rsv2:2;
215 			uint32_t lwc:2;
216 			uint32_t rbp:1;
217 			uint32_t dsen:1;
218 			uint32_t rsv1:4;
219 			uint32_t dqos:3;
220 			uint32_t ns:1;
221 			uint32_t wrttype:4;
222 		} write_cmd;
223 	};
224 } __rte_packed;
225 
226 #define QDMA_SG_FMT_SDB	0x0 /* single data buffer */
227 #define QDMA_SG_FMT_FDS	0x1 /* frame data section */
228 #define QDMA_SG_FMT_SGTE	0x2 /* SGT extension */
229 #define QDMA_SG_SL_SHORT	0x1 /* short length */
230 #define QDMA_SG_SL_LONG	0x0 /* long length */
231 #define QDMA_SG_F	0x1 /* last sg entry */
232 #define QDMA_SG_BMT_ENABLE 0x1
233 #define QDMA_SG_BMT_DISABLE 0x0
234 
235 struct qdma_sg_entry {
236 	uint32_t addr_lo;		/* address 0:31 */
237 	uint32_t addr_hi:17;	/* address 32:48 */
238 	uint32_t rsv:15;
239 	union {
240 		uint32_t data_len_sl0;	/* SL=0, the long format */
241 		struct {
242 			uint32_t len:17;	/* SL=1, the short format */
243 			uint32_t reserve:3;
244 			uint32_t sf:1;
245 			uint32_t sr:1;
246 			uint32_t size:10;	/* buff size */
247 		} data_len_sl1;
248 	} data_len;					/* AVAIL_LENGTH */
249 	union {
250 		uint32_t ctrl_fields;
251 		struct {
252 			uint32_t bpid:14;
253 			uint32_t ivp:1;
254 			uint32_t bmt:1;
255 			uint32_t offset:12;
256 			uint32_t fmt:2;
257 			uint32_t sl:1;
258 			uint32_t f:1;
259 		} ctrl;
260 	};
261 } __rte_packed;
262 
263 /** Represents a DPDMAI raw device */
264 struct dpaa2_dpdmai_dev {
265 	/** Pointer to Next device instance */
266 	TAILQ_ENTRY(dpaa2_qdma_device) next;
267 	/** handle to DPDMAI object */
268 	struct fsl_mc_io dpdmai;
269 	/** HW ID for DPDMAI object */
270 	uint32_t dpdmai_id;
271 	/** Tocken of this device */
272 	uint16_t token;
273 	/** Number of queue in this DPDMAI device */
274 	uint8_t num_queues;
275 	/** RX queues */
276 	struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
277 	/** TX queues */
278 	struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
279 	struct qdma_device *qdma_dev;
280 };
281 
282 static inline struct qdma_device *
QDMA_DEV_OF_VQ(struct qdma_virt_queue * vq)283 QDMA_DEV_OF_VQ(struct qdma_virt_queue *vq)
284 {
285 	return vq->hw_queue->dpdmai_dev->qdma_dev;
286 }
287 
288 #endif /* __DPAA2_QDMA_H__ */
289