1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <inttypes.h>
6 
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
9 #include <rte_eal.h>
10 #include <rte_eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_pci.h>
14 
15 #include "otx2_evdev_stats.h"
16 #include "otx2_evdev.h"
17 #include "otx2_irq.h"
18 #include "otx2_tim_evdev.h"
19 
20 static inline int
sso_get_msix_offsets(const struct rte_eventdev * event_dev)21 sso_get_msix_offsets(const struct rte_eventdev *event_dev)
22 {
23 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
24 	uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
25 	struct otx2_mbox *mbox = dev->mbox;
26 	struct msix_offset_rsp *msix_rsp;
27 	int i, rc;
28 
29 	/* Get SSO and SSOW MSIX vector offsets */
30 	otx2_mbox_alloc_msg_msix_offset(mbox);
31 	rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
32 
33 	for (i = 0; i < nb_ports; i++)
34 		dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
35 
36 	for (i = 0; i < dev->nb_event_queues; i++)
37 		dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
38 
39 	return rc;
40 }
41 
42 void
sso_fastpath_fns_set(struct rte_eventdev * event_dev)43 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
44 {
45 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
46 	/* Single WS modes */
47 	const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = {
48 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
49 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
50 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
51 #undef R
52 	};
53 
54 	const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = {
55 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
56 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
57 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
58 #undef R
59 	};
60 
61 	const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = {
62 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
63 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
64 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
65 #undef R
66 	};
67 
68 	const event_dequeue_burst_t
69 		ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = {
70 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
71 		[f6][f5][f4][f3][f2][f1][f0] =				\
72 			otx2_ssogws_deq_timeout_burst_ ##name,
73 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
74 #undef R
75 	};
76 
77 	const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = {
78 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
79 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
80 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
81 #undef R
82 	};
83 
84 	const event_dequeue_burst_t
85 		ssogws_deq_seg_burst[2][2][2][2][2][2][2] = {
86 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
87 		[f6][f5][f4][f3][f2][f1][f0] =				\
88 			otx2_ssogws_deq_seg_burst_ ##name,
89 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
90 #undef R
91 	};
92 
93 	const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = {
94 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
95 		[f6][f5][f4][f3][f2][f1][f0] =				\
96 			otx2_ssogws_deq_seg_timeout_ ##name,
97 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
98 #undef R
99 	};
100 
101 	const event_dequeue_burst_t
102 		ssogws_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
103 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
104 		[f6][f5][f4][f3][f2][f1][f0] =				\
105 				otx2_ssogws_deq_seg_timeout_burst_ ##name,
106 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
107 #undef R
108 	};
109 
110 
111 	/* Dual WS modes */
112 	const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = {
113 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
114 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
115 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
116 #undef R
117 	};
118 
119 	const event_dequeue_burst_t
120 		ssogws_dual_deq_burst[2][2][2][2][2][2][2] = {
121 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
122 		[f6][f5][f4][f3][f2][f1][f0] =				\
123 			otx2_ssogws_dual_deq_burst_ ##name,
124 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
125 #undef R
126 	};
127 
128 	const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = {
129 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
130 		[f6][f5][f4][f3][f2][f1][f0] =				\
131 			otx2_ssogws_dual_deq_timeout_ ##name,
132 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
133 #undef R
134 	};
135 
136 	const event_dequeue_burst_t
137 		ssogws_dual_deq_timeout_burst[2][2][2][2][2][2][2] = {
138 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
139 	[f6][f5][f4][f3][f2][f1][f0] =					\
140 			otx2_ssogws_dual_deq_timeout_burst_ ##name,
141 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
142 #undef R
143 	};
144 
145 	const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = {
146 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
147 		[f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
148 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
149 #undef R
150 	};
151 
152 	const event_dequeue_burst_t
153 		ssogws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
154 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
155 		[f6][f5][f4][f3][f2][f1][f0] =				\
156 			otx2_ssogws_dual_deq_seg_burst_ ##name,
157 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
158 #undef R
159 	};
160 
161 	const event_dequeue_t
162 		ssogws_dual_deq_seg_timeout[2][2][2][2][2][2][2] = {
163 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
164 		[f6][f5][f4][f3][f2][f1][f0] =				\
165 			otx2_ssogws_dual_deq_seg_timeout_ ##name,
166 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
167 #undef R
168 	};
169 
170 	const event_dequeue_burst_t
171 		ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
172 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			\
173 		[f6][f5][f4][f3][f2][f1][f0] =				\
174 			otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
175 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
176 #undef R
177 	};
178 
179 	/* Tx modes */
180 	const event_tx_adapter_enqueue
181 		ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
182 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
183 		[f6][f5][f4][f3][f2][f1][f0] =				\
184 			otx2_ssogws_tx_adptr_enq_ ## name,
185 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
186 #undef T
187 	};
188 
189 	const event_tx_adapter_enqueue
190 		ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
191 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
192 		[f6][f5][f4][f3][f2][f1][f0] =				\
193 			otx2_ssogws_tx_adptr_enq_seg_ ## name,
194 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
195 #undef T
196 	};
197 
198 	const event_tx_adapter_enqueue
199 		ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
200 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
201 		[f6][f5][f4][f3][f2][f1][f0] =				\
202 			otx2_ssogws_dual_tx_adptr_enq_ ## name,
203 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
204 #undef T
205 	};
206 
207 	const event_tx_adapter_enqueue
208 		ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
209 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
210 		[f6][f5][f4][f3][f2][f1][f0] =				\
211 			otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
212 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
213 #undef T
214 	};
215 
216 	event_dev->enqueue			= otx2_ssogws_enq;
217 	event_dev->enqueue_burst		= otx2_ssogws_enq_burst;
218 	event_dev->enqueue_new_burst		= otx2_ssogws_enq_new_burst;
219 	event_dev->enqueue_forward_burst	= otx2_ssogws_enq_fwd_burst;
220 	if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
221 		event_dev->dequeue		= ssogws_deq_seg
222 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
223 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
224 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
225 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
226 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
227 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
228 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
229 		event_dev->dequeue_burst	= ssogws_deq_seg_burst
230 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
231 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
232 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
233 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
234 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
235 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
236 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
237 		if (dev->is_timeout_deq) {
238 			event_dev->dequeue	= ssogws_deq_seg_timeout
239 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
240 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
241 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
242 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
243 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
244 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
245 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
246 			event_dev->dequeue_burst	=
247 				ssogws_deq_seg_timeout_burst
248 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
249 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
250 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
251 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
252 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
253 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
254 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
255 		}
256 	} else {
257 		event_dev->dequeue			= ssogws_deq
258 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
259 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
260 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
261 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
262 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
263 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
264 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
265 		event_dev->dequeue_burst		= ssogws_deq_burst
266 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
267 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
268 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
269 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
270 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
271 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
272 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
273 		if (dev->is_timeout_deq) {
274 			event_dev->dequeue		= ssogws_deq_timeout
275 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
276 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
277 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
278 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
279 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
280 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
281 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
282 			event_dev->dequeue_burst	=
283 				ssogws_deq_timeout_burst
284 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
285 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
286 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
287 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
288 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
289 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
290 			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
291 		}
292 	}
293 
294 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
295 		/* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
296 		event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
297 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
298 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
299 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
300 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
301 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
302 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
303 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
304 	} else {
305 		event_dev->txa_enqueue = ssogws_tx_adptr_enq
306 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
307 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
308 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
309 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
310 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
311 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
312 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
313 	}
314 
315 	if (dev->dual_ws) {
316 		event_dev->enqueue		= otx2_ssogws_dual_enq;
317 		event_dev->enqueue_burst	= otx2_ssogws_dual_enq_burst;
318 		event_dev->enqueue_new_burst	=
319 					otx2_ssogws_dual_enq_new_burst;
320 		event_dev->enqueue_forward_burst =
321 					otx2_ssogws_dual_enq_fwd_burst;
322 
323 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
324 			event_dev->dequeue	= ssogws_dual_deq_seg
325 				[!!(dev->rx_offloads &
326 						NIX_RX_OFFLOAD_SECURITY_F)]
327 				[!!(dev->rx_offloads &
328 						NIX_RX_OFFLOAD_TSTAMP_F)]
329 				[!!(dev->rx_offloads &
330 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
331 				[!!(dev->rx_offloads &
332 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
333 				[!!(dev->rx_offloads &
334 						NIX_RX_OFFLOAD_CHECKSUM_F)]
335 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
336 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
337 			event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
338 				[!!(dev->rx_offloads &
339 						NIX_RX_OFFLOAD_SECURITY_F)]
340 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
341 				[!!(dev->rx_offloads &
342 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
343 				[!!(dev->rx_offloads &
344 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
345 				[!!(dev->rx_offloads &
346 						NIX_RX_OFFLOAD_CHECKSUM_F)]
347 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
348 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
349 			if (dev->is_timeout_deq) {
350 				event_dev->dequeue	=
351 					ssogws_dual_deq_seg_timeout
352 					[!!(dev->rx_offloads &
353 						NIX_RX_OFFLOAD_SECURITY_F)]
354 					[!!(dev->rx_offloads &
355 						NIX_RX_OFFLOAD_TSTAMP_F)]
356 					[!!(dev->rx_offloads &
357 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
358 					[!!(dev->rx_offloads &
359 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
360 					[!!(dev->rx_offloads &
361 						NIX_RX_OFFLOAD_CHECKSUM_F)]
362 					[!!(dev->rx_offloads &
363 							NIX_RX_OFFLOAD_PTYPE_F)]
364 					[!!(dev->rx_offloads &
365 							NIX_RX_OFFLOAD_RSS_F)];
366 				event_dev->dequeue_burst =
367 					ssogws_dual_deq_seg_timeout_burst
368 					[!!(dev->rx_offloads &
369 						NIX_RX_OFFLOAD_SECURITY_F)]
370 					[!!(dev->rx_offloads &
371 						NIX_RX_OFFLOAD_TSTAMP_F)]
372 					[!!(dev->rx_offloads &
373 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
374 					[!!(dev->rx_offloads &
375 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
376 					[!!(dev->rx_offloads &
377 						NIX_RX_OFFLOAD_CHECKSUM_F)]
378 					[!!(dev->rx_offloads &
379 							NIX_RX_OFFLOAD_PTYPE_F)]
380 					[!!(dev->rx_offloads &
381 							NIX_RX_OFFLOAD_RSS_F)];
382 			}
383 		} else {
384 			event_dev->dequeue		= ssogws_dual_deq
385 				[!!(dev->rx_offloads &
386 						NIX_RX_OFFLOAD_SECURITY_F)]
387 				[!!(dev->rx_offloads &
388 						NIX_RX_OFFLOAD_TSTAMP_F)]
389 				[!!(dev->rx_offloads &
390 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
391 				[!!(dev->rx_offloads &
392 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
393 				[!!(dev->rx_offloads &
394 						NIX_RX_OFFLOAD_CHECKSUM_F)]
395 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
396 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
397 			event_dev->dequeue_burst	= ssogws_dual_deq_burst
398 				[!!(dev->rx_offloads &
399 						NIX_RX_OFFLOAD_SECURITY_F)]
400 				[!!(dev->rx_offloads &
401 						NIX_RX_OFFLOAD_TSTAMP_F)]
402 				[!!(dev->rx_offloads &
403 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
404 				[!!(dev->rx_offloads &
405 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
406 				[!!(dev->rx_offloads &
407 						NIX_RX_OFFLOAD_CHECKSUM_F)]
408 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
409 				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
410 			if (dev->is_timeout_deq) {
411 				event_dev->dequeue	=
412 					ssogws_dual_deq_timeout
413 					[!!(dev->rx_offloads &
414 						NIX_RX_OFFLOAD_SECURITY_F)]
415 					[!!(dev->rx_offloads &
416 						NIX_RX_OFFLOAD_TSTAMP_F)]
417 					[!!(dev->rx_offloads &
418 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
419 					[!!(dev->rx_offloads &
420 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
421 					[!!(dev->rx_offloads &
422 						NIX_RX_OFFLOAD_CHECKSUM_F)]
423 					[!!(dev->rx_offloads &
424 							NIX_RX_OFFLOAD_PTYPE_F)]
425 					[!!(dev->rx_offloads &
426 							NIX_RX_OFFLOAD_RSS_F)];
427 				event_dev->dequeue_burst =
428 					ssogws_dual_deq_timeout_burst
429 					[!!(dev->rx_offloads &
430 						NIX_RX_OFFLOAD_SECURITY_F)]
431 					[!!(dev->rx_offloads &
432 						NIX_RX_OFFLOAD_TSTAMP_F)]
433 					[!!(dev->rx_offloads &
434 						NIX_RX_OFFLOAD_MARK_UPDATE_F)]
435 					[!!(dev->rx_offloads &
436 						NIX_RX_OFFLOAD_VLAN_STRIP_F)]
437 					[!!(dev->rx_offloads &
438 						NIX_RX_OFFLOAD_CHECKSUM_F)]
439 					[!!(dev->rx_offloads &
440 							NIX_RX_OFFLOAD_PTYPE_F)]
441 					[!!(dev->rx_offloads &
442 							NIX_RX_OFFLOAD_RSS_F)];
443 			}
444 		}
445 
446 		if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
447 		/* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
448 			event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
449 				[!!(dev->tx_offloads &
450 						NIX_TX_OFFLOAD_SECURITY_F)]
451 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
452 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
453 				[!!(dev->tx_offloads &
454 						NIX_TX_OFFLOAD_MBUF_NOFF_F)]
455 				[!!(dev->tx_offloads &
456 						NIX_TX_OFFLOAD_VLAN_QINQ_F)]
457 				[!!(dev->tx_offloads &
458 						NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
459 				[!!(dev->tx_offloads &
460 						NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
461 		} else {
462 			event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
463 				[!!(dev->tx_offloads &
464 						NIX_TX_OFFLOAD_SECURITY_F)]
465 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
466 				[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
467 				[!!(dev->tx_offloads &
468 						NIX_TX_OFFLOAD_MBUF_NOFF_F)]
469 				[!!(dev->tx_offloads &
470 						NIX_TX_OFFLOAD_VLAN_QINQ_F)]
471 				[!!(dev->tx_offloads &
472 						NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
473 				[!!(dev->tx_offloads &
474 						NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
475 		}
476 	}
477 
478 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
479 	rte_mb();
480 }
481 
482 static void
otx2_sso_info_get(struct rte_eventdev * event_dev,struct rte_event_dev_info * dev_info)483 otx2_sso_info_get(struct rte_eventdev *event_dev,
484 		  struct rte_event_dev_info *dev_info)
485 {
486 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
487 
488 	dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
489 	dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
490 	dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
491 	dev_info->max_event_queues = dev->max_event_queues;
492 	dev_info->max_event_queue_flows = (1ULL << 20);
493 	dev_info->max_event_queue_priority_levels = 8;
494 	dev_info->max_event_priority_levels = 1;
495 	dev_info->max_event_ports = dev->max_event_ports;
496 	dev_info->max_event_port_dequeue_depth = 1;
497 	dev_info->max_event_port_enqueue_depth = 1;
498 	dev_info->max_num_events =  dev->max_num_events;
499 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
500 					RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
501 					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
502 					RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
503 					RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
504 					RTE_EVENT_DEV_CAP_NONSEQ_MODE |
505 					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
506 }
507 
508 static void
sso_port_link_modify(struct otx2_ssogws * ws,uint8_t queue,uint8_t enable)509 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
510 {
511 	uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
512 	uint64_t val;
513 
514 	val = queue;
515 	val |= 0ULL << 12; /* SET 0 */
516 	val |= 0x8000800080000000; /* Dont modify rest of the masks */
517 	val |= (uint64_t)enable << 14;   /* Enable/Disable Membership. */
518 
519 	otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
520 }
521 
522 static int
otx2_sso_port_link(struct rte_eventdev * event_dev,void * port,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links)523 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
524 		   const uint8_t queues[], const uint8_t priorities[],
525 		   uint16_t nb_links)
526 {
527 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
528 	uint8_t port_id = 0;
529 	uint16_t link;
530 
531 	RTE_SET_USED(priorities);
532 	for (link = 0; link < nb_links; link++) {
533 		if (dev->dual_ws) {
534 			struct otx2_ssogws_dual *ws = port;
535 
536 			port_id = ws->port;
537 			sso_port_link_modify((struct otx2_ssogws *)
538 					&ws->ws_state[0], queues[link], true);
539 			sso_port_link_modify((struct otx2_ssogws *)
540 					&ws->ws_state[1], queues[link], true);
541 		} else {
542 			struct otx2_ssogws *ws = port;
543 
544 			port_id = ws->port;
545 			sso_port_link_modify(ws, queues[link], true);
546 		}
547 	}
548 	sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
549 
550 	return (int)nb_links;
551 }
552 
553 static int
otx2_sso_port_unlink(struct rte_eventdev * event_dev,void * port,uint8_t queues[],uint16_t nb_unlinks)554 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
555 		     uint8_t queues[], uint16_t nb_unlinks)
556 {
557 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
558 	uint8_t port_id = 0;
559 	uint16_t unlink;
560 
561 	for (unlink = 0; unlink < nb_unlinks; unlink++) {
562 		if (dev->dual_ws) {
563 			struct otx2_ssogws_dual *ws = port;
564 
565 			port_id = ws->port;
566 			sso_port_link_modify((struct otx2_ssogws *)
567 					&ws->ws_state[0], queues[unlink],
568 					false);
569 			sso_port_link_modify((struct otx2_ssogws *)
570 					&ws->ws_state[1], queues[unlink],
571 					false);
572 		} else {
573 			struct otx2_ssogws *ws = port;
574 
575 			port_id = ws->port;
576 			sso_port_link_modify(ws, queues[unlink], false);
577 		}
578 	}
579 	sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
580 
581 	return (int)nb_unlinks;
582 }
583 
584 static int
sso_hw_lf_cfg(struct otx2_mbox * mbox,enum otx2_sso_lf_type type,uint16_t nb_lf,uint8_t attach)585 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
586 	      uint16_t nb_lf, uint8_t attach)
587 {
588 	if (attach) {
589 		struct rsrc_attach_req *req;
590 
591 		req = otx2_mbox_alloc_msg_attach_resources(mbox);
592 		switch (type) {
593 		case SSO_LF_GGRP:
594 			req->sso = nb_lf;
595 			break;
596 		case SSO_LF_GWS:
597 			req->ssow = nb_lf;
598 			break;
599 		default:
600 			return -EINVAL;
601 		}
602 		req->modify = true;
603 		if (otx2_mbox_process(mbox) < 0)
604 			return -EIO;
605 	} else {
606 		struct rsrc_detach_req *req;
607 
608 		req = otx2_mbox_alloc_msg_detach_resources(mbox);
609 		switch (type) {
610 		case SSO_LF_GGRP:
611 			req->sso = true;
612 			break;
613 		case SSO_LF_GWS:
614 			req->ssow = true;
615 			break;
616 		default:
617 			return -EINVAL;
618 		}
619 		req->partial = true;
620 		if (otx2_mbox_process(mbox) < 0)
621 			return -EIO;
622 	}
623 
624 	return 0;
625 }
626 
627 static int
sso_lf_cfg(struct otx2_sso_evdev * dev,struct otx2_mbox * mbox,enum otx2_sso_lf_type type,uint16_t nb_lf,uint8_t alloc)628 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
629 	   enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
630 {
631 	void *rsp;
632 	int rc;
633 
634 	if (alloc) {
635 		switch (type) {
636 		case SSO_LF_GGRP:
637 			{
638 			struct sso_lf_alloc_req *req_ggrp;
639 			req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
640 			req_ggrp->hwgrps = nb_lf;
641 			}
642 			break;
643 		case SSO_LF_GWS:
644 			{
645 			struct ssow_lf_alloc_req *req_hws;
646 			req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
647 			req_hws->hws = nb_lf;
648 			}
649 			break;
650 		default:
651 			return -EINVAL;
652 		}
653 	} else {
654 		switch (type) {
655 		case SSO_LF_GGRP:
656 			{
657 			struct sso_lf_free_req *req_ggrp;
658 			req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
659 			req_ggrp->hwgrps = nb_lf;
660 			}
661 			break;
662 		case SSO_LF_GWS:
663 			{
664 			struct ssow_lf_free_req *req_hws;
665 			req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
666 			req_hws->hws = nb_lf;
667 			}
668 			break;
669 		default:
670 			return -EINVAL;
671 		}
672 	}
673 
674 	rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
675 	if (rc < 0)
676 		return rc;
677 
678 	if (alloc && type == SSO_LF_GGRP) {
679 		struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
680 
681 		dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
682 		dev->xae_waes = rsp_ggrp->xaq_wq_entries;
683 		dev->iue = rsp_ggrp->in_unit_entries;
684 	}
685 
686 	return 0;
687 }
688 
689 static void
otx2_sso_port_release(void * port)690 otx2_sso_port_release(void *port)
691 {
692 	struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
693 	struct otx2_sso_evdev *dev;
694 	int i;
695 
696 	if (!gws_cookie->configured)
697 		goto free;
698 
699 	dev = sso_pmd_priv(gws_cookie->event_dev);
700 	if (dev->dual_ws) {
701 		struct otx2_ssogws_dual *ws = port;
702 
703 		for (i = 0; i < dev->nb_event_queues; i++) {
704 			sso_port_link_modify((struct otx2_ssogws *)
705 					     &ws->ws_state[0], i, false);
706 			sso_port_link_modify((struct otx2_ssogws *)
707 					     &ws->ws_state[1], i, false);
708 		}
709 		memset(ws, 0, sizeof(*ws));
710 	} else {
711 		struct otx2_ssogws *ws = port;
712 
713 		for (i = 0; i < dev->nb_event_queues; i++)
714 			sso_port_link_modify(ws, i, false);
715 		memset(ws, 0, sizeof(*ws));
716 	}
717 
718 	memset(gws_cookie, 0, sizeof(*gws_cookie));
719 
720 free:
721 	rte_free(gws_cookie);
722 }
723 
724 static void
otx2_sso_queue_release(struct rte_eventdev * event_dev,uint8_t queue_id)725 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
726 {
727 	RTE_SET_USED(event_dev);
728 	RTE_SET_USED(queue_id);
729 }
730 
731 static void
sso_restore_links(const struct rte_eventdev * event_dev)732 sso_restore_links(const struct rte_eventdev *event_dev)
733 {
734 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
735 	uint16_t *links_map;
736 	int i, j;
737 
738 	for (i = 0; i < dev->nb_event_ports; i++) {
739 		links_map = event_dev->data->links_map;
740 		/* Point links_map to this port specific area */
741 		links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
742 		if (dev->dual_ws) {
743 			struct otx2_ssogws_dual *ws;
744 
745 			ws = event_dev->data->ports[i];
746 			for (j = 0; j < dev->nb_event_queues; j++) {
747 				if (links_map[j] == 0xdead)
748 					continue;
749 				sso_port_link_modify((struct otx2_ssogws *)
750 						&ws->ws_state[0], j, true);
751 				sso_port_link_modify((struct otx2_ssogws *)
752 						&ws->ws_state[1], j, true);
753 				sso_func_trace("Restoring port %d queue %d "
754 						"link", i, j);
755 			}
756 		} else {
757 			struct otx2_ssogws *ws;
758 
759 			ws = event_dev->data->ports[i];
760 			for (j = 0; j < dev->nb_event_queues; j++) {
761 				if (links_map[j] == 0xdead)
762 					continue;
763 				sso_port_link_modify(ws, j, true);
764 				sso_func_trace("Restoring port %d queue %d "
765 						"link", i, j);
766 			}
767 		}
768 	}
769 }
770 
771 static void
sso_set_port_ops(struct otx2_ssogws * ws,uintptr_t base)772 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
773 {
774 	ws->tag_op		= base + SSOW_LF_GWS_TAG;
775 	ws->wqp_op		= base + SSOW_LF_GWS_WQP;
776 	ws->getwrk_op		= base + SSOW_LF_GWS_OP_GET_WORK;
777 	ws->swtag_flush_op	= base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
778 	ws->swtag_norm_op	= base + SSOW_LF_GWS_OP_SWTAG_NORM;
779 	ws->swtag_desched_op	= base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
780 }
781 
782 static int
sso_configure_dual_ports(const struct rte_eventdev * event_dev)783 sso_configure_dual_ports(const struct rte_eventdev *event_dev)
784 {
785 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
786 	struct otx2_mbox *mbox = dev->mbox;
787 	uint8_t vws = 0;
788 	uint8_t nb_lf;
789 	int i, rc;
790 
791 	otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
792 
793 	nb_lf = dev->nb_event_ports * 2;
794 	/* Ask AF to attach required LFs. */
795 	rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
796 	if (rc < 0) {
797 		otx2_err("Failed to attach SSO GWS LF");
798 		return -ENODEV;
799 	}
800 
801 	if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
802 		sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
803 		otx2_err("Failed to init SSO GWS LF");
804 		return -ENODEV;
805 	}
806 
807 	for (i = 0; i < dev->nb_event_ports; i++) {
808 		struct otx2_ssogws_cookie *gws_cookie;
809 		struct otx2_ssogws_dual *ws;
810 		uintptr_t base;
811 
812 		if (event_dev->data->ports[i] != NULL) {
813 			ws = event_dev->data->ports[i];
814 		} else {
815 			/* Allocate event port memory */
816 			ws = rte_zmalloc_socket("otx2_sso_ws",
817 					sizeof(struct otx2_ssogws_dual) +
818 					RTE_CACHE_LINE_SIZE,
819 					RTE_CACHE_LINE_SIZE,
820 					event_dev->data->socket_id);
821 			if (ws == NULL) {
822 				otx2_err("Failed to alloc memory for port=%d",
823 					 i);
824 				rc = -ENOMEM;
825 				break;
826 			}
827 
828 			/* First cache line is reserved for cookie */
829 			ws = (struct otx2_ssogws_dual *)
830 				((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
831 		}
832 
833 		ws->port = i;
834 		base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
835 		sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
836 		vws++;
837 
838 		base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
839 		sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
840 		vws++;
841 
842 		gws_cookie = ssogws_get_cookie(ws);
843 		gws_cookie->event_dev = event_dev;
844 		gws_cookie->configured = 1;
845 
846 		event_dev->data->ports[i] = ws;
847 	}
848 
849 	if (rc < 0) {
850 		sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
851 		sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
852 	}
853 
854 	return rc;
855 }
856 
857 static int
sso_configure_ports(const struct rte_eventdev * event_dev)858 sso_configure_ports(const struct rte_eventdev *event_dev)
859 {
860 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
861 	struct otx2_mbox *mbox = dev->mbox;
862 	uint8_t nb_lf;
863 	int i, rc;
864 
865 	otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
866 
867 	nb_lf = dev->nb_event_ports;
868 	/* Ask AF to attach required LFs. */
869 	rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
870 	if (rc < 0) {
871 		otx2_err("Failed to attach SSO GWS LF");
872 		return -ENODEV;
873 	}
874 
875 	if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
876 		sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
877 		otx2_err("Failed to init SSO GWS LF");
878 		return -ENODEV;
879 	}
880 
881 	for (i = 0; i < nb_lf; i++) {
882 		struct otx2_ssogws_cookie *gws_cookie;
883 		struct otx2_ssogws *ws;
884 		uintptr_t base;
885 
886 		/* Free memory prior to re-allocation if needed */
887 		if (event_dev->data->ports[i] != NULL) {
888 			ws = event_dev->data->ports[i];
889 			rte_free(ssogws_get_cookie(ws));
890 			ws = NULL;
891 		}
892 
893 		/* Allocate event port memory */
894 		ws = rte_zmalloc_socket("otx2_sso_ws",
895 					sizeof(struct otx2_ssogws) +
896 					RTE_CACHE_LINE_SIZE,
897 					RTE_CACHE_LINE_SIZE,
898 					event_dev->data->socket_id);
899 		if (ws == NULL) {
900 			otx2_err("Failed to alloc memory for port=%d", i);
901 			rc = -ENOMEM;
902 			break;
903 		}
904 
905 		/* First cache line is reserved for cookie */
906 		ws = (struct otx2_ssogws *)
907 			((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
908 
909 		ws->port = i;
910 		base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
911 		sso_set_port_ops(ws, base);
912 
913 		gws_cookie = ssogws_get_cookie(ws);
914 		gws_cookie->event_dev = event_dev;
915 		gws_cookie->configured = 1;
916 
917 		event_dev->data->ports[i] = ws;
918 	}
919 
920 	if (rc < 0) {
921 		sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
922 		sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
923 	}
924 
925 	return rc;
926 }
927 
928 static int
sso_configure_queues(const struct rte_eventdev * event_dev)929 sso_configure_queues(const struct rte_eventdev *event_dev)
930 {
931 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
932 	struct otx2_mbox *mbox = dev->mbox;
933 	uint8_t nb_lf;
934 	int rc;
935 
936 	otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
937 
938 	nb_lf = dev->nb_event_queues;
939 	/* Ask AF to attach required LFs. */
940 	rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
941 	if (rc < 0) {
942 		otx2_err("Failed to attach SSO GGRP LF");
943 		return -ENODEV;
944 	}
945 
946 	if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
947 		sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
948 		otx2_err("Failed to init SSO GGRP LF");
949 		return -ENODEV;
950 	}
951 
952 	return rc;
953 }
954 
955 static int
sso_xaq_allocate(struct otx2_sso_evdev * dev)956 sso_xaq_allocate(struct otx2_sso_evdev *dev)
957 {
958 	const struct rte_memzone *mz;
959 	struct npa_aura_s *aura;
960 	static int reconfig_cnt;
961 	char pool_name[RTE_MEMZONE_NAMESIZE];
962 	uint32_t xaq_cnt;
963 	int rc;
964 
965 	if (dev->xaq_pool)
966 		rte_mempool_free(dev->xaq_pool);
967 
968 	/*
969 	 * Allocate memory for Add work backpressure.
970 	 */
971 	mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
972 	if (mz == NULL)
973 		mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
974 						 OTX2_ALIGN +
975 						 sizeof(struct npa_aura_s),
976 						 rte_socket_id(),
977 						 RTE_MEMZONE_IOVA_CONTIG,
978 						 OTX2_ALIGN);
979 	if (mz == NULL) {
980 		otx2_err("Failed to allocate mem for fcmem");
981 		return -ENOMEM;
982 	}
983 
984 	dev->fc_iova = mz->iova;
985 	dev->fc_mem = mz->addr;
986 
987 	aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
988 	memset(aura, 0, sizeof(struct npa_aura_s));
989 
990 	aura->fc_ena = 1;
991 	aura->fc_addr = dev->fc_iova;
992 	aura->fc_hyst_bits = 0; /* Store count on all updates */
993 
994 	/* Taken from HRM 14.3.3(4) */
995 	xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
996 	if (dev->xae_cnt)
997 		xaq_cnt += dev->xae_cnt / dev->xae_waes;
998 	else if (dev->adptr_xae_cnt)
999 		xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
1000 			(OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
1001 	else
1002 		xaq_cnt += (dev->iue / dev->xae_waes) +
1003 			(OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
1004 
1005 	otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
1006 	/* Setup XAQ based on number of nb queues. */
1007 	snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
1008 	dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
1009 			xaq_cnt, dev->xaq_buf_size, 0, 0,
1010 			rte_socket_id(), 0);
1011 
1012 	if (dev->xaq_pool == NULL) {
1013 		otx2_err("Unable to create empty mempool.");
1014 		rte_memzone_free(mz);
1015 		return -ENOMEM;
1016 	}
1017 
1018 	rc = rte_mempool_set_ops_byname(dev->xaq_pool,
1019 					rte_mbuf_platform_mempool_ops(), aura);
1020 	if (rc != 0) {
1021 		otx2_err("Unable to set xaqpool ops.");
1022 		goto alloc_fail;
1023 	}
1024 
1025 	rc = rte_mempool_populate_default(dev->xaq_pool);
1026 	if (rc < 0) {
1027 		otx2_err("Unable to set populate xaqpool.");
1028 		goto alloc_fail;
1029 	}
1030 	reconfig_cnt++;
1031 	/* When SW does addwork (enqueue) check if there is space in XAQ by
1032 	 * comparing fc_addr above against the xaq_lmt calculated below.
1033 	 * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
1034 	 * to request XAQ to cache them even before enqueue is called.
1035 	 */
1036 	dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
1037 				  dev->nb_event_queues);
1038 	dev->nb_xaq_cfg = xaq_cnt;
1039 
1040 	return 0;
1041 alloc_fail:
1042 	rte_mempool_free(dev->xaq_pool);
1043 	rte_memzone_free(mz);
1044 	return rc;
1045 }
1046 
1047 static int
sso_ggrp_alloc_xaq(struct otx2_sso_evdev * dev)1048 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
1049 {
1050 	struct otx2_mbox *mbox = dev->mbox;
1051 	struct sso_hw_setconfig *req;
1052 
1053 	otx2_sso_dbg("Configuring XAQ for GGRPs");
1054 	req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
1055 	req->npa_pf_func = otx2_npa_pf_func_get();
1056 	req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
1057 	req->hwgrps = dev->nb_event_queues;
1058 
1059 	return otx2_mbox_process(mbox);
1060 }
1061 
1062 static void
sso_lf_teardown(struct otx2_sso_evdev * dev,enum otx2_sso_lf_type lf_type)1063 sso_lf_teardown(struct otx2_sso_evdev *dev,
1064 		enum otx2_sso_lf_type lf_type)
1065 {
1066 	uint8_t nb_lf;
1067 
1068 	switch (lf_type) {
1069 	case SSO_LF_GGRP:
1070 		nb_lf = dev->nb_event_queues;
1071 		break;
1072 	case SSO_LF_GWS:
1073 		nb_lf = dev->nb_event_ports;
1074 		nb_lf *= dev->dual_ws ? 2 : 1;
1075 		break;
1076 	default:
1077 		return;
1078 	}
1079 
1080 	sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
1081 	sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
1082 }
1083 
1084 static int
otx2_sso_configure(const struct rte_eventdev * event_dev)1085 otx2_sso_configure(const struct rte_eventdev *event_dev)
1086 {
1087 	struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
1088 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1089 	uint32_t deq_tmo_ns;
1090 	int rc;
1091 
1092 	sso_func_trace();
1093 	deq_tmo_ns = conf->dequeue_timeout_ns;
1094 
1095 	if (deq_tmo_ns == 0)
1096 		deq_tmo_ns = dev->min_dequeue_timeout_ns;
1097 
1098 	if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
1099 	    deq_tmo_ns > dev->max_dequeue_timeout_ns) {
1100 		otx2_err("Unsupported dequeue timeout requested");
1101 		return -EINVAL;
1102 	}
1103 
1104 	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
1105 		dev->is_timeout_deq = 1;
1106 
1107 	dev->deq_tmo_ns = deq_tmo_ns;
1108 
1109 	if (conf->nb_event_ports > dev->max_event_ports ||
1110 	    conf->nb_event_queues > dev->max_event_queues) {
1111 		otx2_err("Unsupported event queues/ports requested");
1112 		return -EINVAL;
1113 	}
1114 
1115 	if (conf->nb_event_port_dequeue_depth > 1) {
1116 		otx2_err("Unsupported event port deq depth requested");
1117 		return -EINVAL;
1118 	}
1119 
1120 	if (conf->nb_event_port_enqueue_depth > 1) {
1121 		otx2_err("Unsupported event port enq depth requested");
1122 		return -EINVAL;
1123 	}
1124 
1125 	if (dev->configured)
1126 		sso_unregister_irqs(event_dev);
1127 
1128 	if (dev->nb_event_queues) {
1129 		/* Finit any previous queues. */
1130 		sso_lf_teardown(dev, SSO_LF_GGRP);
1131 	}
1132 	if (dev->nb_event_ports) {
1133 		/* Finit any previous ports. */
1134 		sso_lf_teardown(dev, SSO_LF_GWS);
1135 	}
1136 
1137 	dev->nb_event_queues = conf->nb_event_queues;
1138 	dev->nb_event_ports = conf->nb_event_ports;
1139 
1140 	if (dev->dual_ws)
1141 		rc = sso_configure_dual_ports(event_dev);
1142 	else
1143 		rc = sso_configure_ports(event_dev);
1144 
1145 	if (rc < 0) {
1146 		otx2_err("Failed to configure event ports");
1147 		return -ENODEV;
1148 	}
1149 
1150 	if (sso_configure_queues(event_dev) < 0) {
1151 		otx2_err("Failed to configure event queues");
1152 		rc = -ENODEV;
1153 		goto teardown_hws;
1154 	}
1155 
1156 	if (sso_xaq_allocate(dev) < 0) {
1157 		rc = -ENOMEM;
1158 		goto teardown_hwggrp;
1159 	}
1160 
1161 	/* Restore any prior port-queue mapping. */
1162 	sso_restore_links(event_dev);
1163 	rc = sso_ggrp_alloc_xaq(dev);
1164 	if (rc < 0) {
1165 		otx2_err("Failed to alloc xaq to ggrp %d", rc);
1166 		goto teardown_hwggrp;
1167 	}
1168 
1169 	rc = sso_get_msix_offsets(event_dev);
1170 	if (rc < 0) {
1171 		otx2_err("Failed to get msix offsets %d", rc);
1172 		goto teardown_hwggrp;
1173 	}
1174 
1175 	rc = sso_register_irqs(event_dev);
1176 	if (rc < 0) {
1177 		otx2_err("Failed to register irq %d", rc);
1178 		goto teardown_hwggrp;
1179 	}
1180 
1181 	dev->configured = 1;
1182 	rte_mb();
1183 
1184 	return 0;
1185 teardown_hwggrp:
1186 	sso_lf_teardown(dev, SSO_LF_GGRP);
1187 teardown_hws:
1188 	sso_lf_teardown(dev, SSO_LF_GWS);
1189 	dev->nb_event_queues = 0;
1190 	dev->nb_event_ports = 0;
1191 	dev->configured = 0;
1192 	return rc;
1193 }
1194 
1195 static void
otx2_sso_queue_def_conf(struct rte_eventdev * event_dev,uint8_t queue_id,struct rte_event_queue_conf * queue_conf)1196 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
1197 			struct rte_event_queue_conf *queue_conf)
1198 {
1199 	RTE_SET_USED(event_dev);
1200 	RTE_SET_USED(queue_id);
1201 
1202 	queue_conf->nb_atomic_flows = (1ULL << 20);
1203 	queue_conf->nb_atomic_order_sequences = (1ULL << 20);
1204 	queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
1205 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1206 }
1207 
1208 static int
otx2_sso_queue_setup(struct rte_eventdev * event_dev,uint8_t queue_id,const struct rte_event_queue_conf * queue_conf)1209 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
1210 		     const struct rte_event_queue_conf *queue_conf)
1211 {
1212 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1213 	struct otx2_mbox *mbox = dev->mbox;
1214 	struct sso_grp_priority *req;
1215 	int rc;
1216 
1217 	sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
1218 
1219 	req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
1220 	req->grp = queue_id;
1221 	req->weight = 0xFF;
1222 	req->affinity = 0xFF;
1223 	/* Normalize <0-255> to <0-7> */
1224 	req->priority = queue_conf->priority / 32;
1225 
1226 	rc = otx2_mbox_process(mbox);
1227 	if (rc < 0) {
1228 		otx2_err("Failed to set priority queue=%d", queue_id);
1229 		return rc;
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static void
otx2_sso_port_def_conf(struct rte_eventdev * event_dev,uint8_t port_id,struct rte_event_port_conf * port_conf)1236 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
1237 		       struct rte_event_port_conf *port_conf)
1238 {
1239 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1240 
1241 	RTE_SET_USED(port_id);
1242 	port_conf->new_event_threshold = dev->max_num_events;
1243 	port_conf->dequeue_depth = 1;
1244 	port_conf->enqueue_depth = 1;
1245 }
1246 
1247 static int
otx2_sso_port_setup(struct rte_eventdev * event_dev,uint8_t port_id,const struct rte_event_port_conf * port_conf)1248 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
1249 		    const struct rte_event_port_conf *port_conf)
1250 {
1251 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1252 	uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
1253 	uint64_t val;
1254 	uint16_t q;
1255 
1256 	sso_func_trace("Port=%d", port_id);
1257 	RTE_SET_USED(port_conf);
1258 
1259 	if (event_dev->data->ports[port_id] == NULL) {
1260 		otx2_err("Invalid port Id %d", port_id);
1261 		return -EINVAL;
1262 	}
1263 
1264 	for (q = 0; q < dev->nb_event_queues; q++) {
1265 		grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
1266 		if (grps_base[q] == 0) {
1267 			otx2_err("Failed to get grp[%d] base addr", q);
1268 			return -EINVAL;
1269 		}
1270 	}
1271 
1272 	/* Set get_work timeout for HWS */
1273 	val = NSEC2USEC(dev->deq_tmo_ns) - 1;
1274 
1275 	if (dev->dual_ws) {
1276 		struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
1277 
1278 		rte_memcpy(ws->grps_base, grps_base,
1279 			   sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1280 		ws->fc_mem = dev->fc_mem;
1281 		ws->xaq_lmt = dev->xaq_lmt;
1282 		ws->tstamp = dev->tstamp;
1283 		otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1284 			     ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1285 		otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1286 			     ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1287 	} else {
1288 		struct otx2_ssogws *ws = event_dev->data->ports[port_id];
1289 		uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1290 
1291 		rte_memcpy(ws->grps_base, grps_base,
1292 			   sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1293 		ws->fc_mem = dev->fc_mem;
1294 		ws->xaq_lmt = dev->xaq_lmt;
1295 		ws->tstamp = dev->tstamp;
1296 		otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
1297 	}
1298 
1299 	otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
1300 
1301 	return 0;
1302 }
1303 
1304 static int
otx2_sso_timeout_ticks(struct rte_eventdev * event_dev,uint64_t ns,uint64_t * tmo_ticks)1305 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
1306 		       uint64_t *tmo_ticks)
1307 {
1308 	RTE_SET_USED(event_dev);
1309 	*tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
1310 
1311 	return 0;
1312 }
1313 
1314 static void
ssogws_dump(struct otx2_ssogws * ws,FILE * f)1315 ssogws_dump(struct otx2_ssogws *ws, FILE *f)
1316 {
1317 	uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1318 
1319 	fprintf(f, "SSOW_LF_GWS Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1320 	fprintf(f, "SSOW_LF_GWS_LINKS       0x%" PRIx64 "\n",
1321 		otx2_read64(base + SSOW_LF_GWS_LINKS));
1322 	fprintf(f, "SSOW_LF_GWS_PENDWQP     0x%" PRIx64 "\n",
1323 		otx2_read64(base + SSOW_LF_GWS_PENDWQP));
1324 	fprintf(f, "SSOW_LF_GWS_PENDSTATE   0x%" PRIx64 "\n",
1325 		otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
1326 	fprintf(f, "SSOW_LF_GWS_NW_TIM      0x%" PRIx64 "\n",
1327 		otx2_read64(base + SSOW_LF_GWS_NW_TIM));
1328 	fprintf(f, "SSOW_LF_GWS_TAG         0x%" PRIx64 "\n",
1329 		otx2_read64(base + SSOW_LF_GWS_TAG));
1330 	fprintf(f, "SSOW_LF_GWS_WQP         0x%" PRIx64 "\n",
1331 		otx2_read64(base + SSOW_LF_GWS_TAG));
1332 	fprintf(f, "SSOW_LF_GWS_SWTP        0x%" PRIx64 "\n",
1333 		otx2_read64(base + SSOW_LF_GWS_SWTP));
1334 	fprintf(f, "SSOW_LF_GWS_PENDTAG     0x%" PRIx64 "\n",
1335 		otx2_read64(base + SSOW_LF_GWS_PENDTAG));
1336 }
1337 
1338 static void
ssoggrp_dump(uintptr_t base,FILE * f)1339 ssoggrp_dump(uintptr_t base, FILE *f)
1340 {
1341 	fprintf(f, "SSO_LF_GGRP Base addr   0x%" PRIx64 "\n", (uint64_t)base);
1342 	fprintf(f, "SSO_LF_GGRP_QCTL        0x%" PRIx64 "\n",
1343 		otx2_read64(base + SSO_LF_GGRP_QCTL));
1344 	fprintf(f, "SSO_LF_GGRP_XAQ_CNT     0x%" PRIx64 "\n",
1345 		otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
1346 	fprintf(f, "SSO_LF_GGRP_INT_THR     0x%" PRIx64 "\n",
1347 		otx2_read64(base + SSO_LF_GGRP_INT_THR));
1348 	fprintf(f, "SSO_LF_GGRP_INT_CNT     0x%" PRIX64 "\n",
1349 		otx2_read64(base + SSO_LF_GGRP_INT_CNT));
1350 	fprintf(f, "SSO_LF_GGRP_AQ_CNT      0x%" PRIX64 "\n",
1351 		otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
1352 	fprintf(f, "SSO_LF_GGRP_AQ_THR      0x%" PRIX64 "\n",
1353 		otx2_read64(base + SSO_LF_GGRP_AQ_THR));
1354 	fprintf(f, "SSO_LF_GGRP_MISC_CNT    0x%" PRIx64 "\n",
1355 		otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
1356 }
1357 
1358 static void
otx2_sso_dump(struct rte_eventdev * event_dev,FILE * f)1359 otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
1360 {
1361 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1362 	uint8_t queue;
1363 	uint8_t port;
1364 
1365 	fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
1366 		"dual_ws" : "single_ws");
1367 	/* Dump SSOW registers */
1368 	for (port = 0; port < dev->nb_event_ports; port++) {
1369 		if (dev->dual_ws) {
1370 			struct otx2_ssogws_dual *ws =
1371 				event_dev->data->ports[port];
1372 
1373 			fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
1374 				__func__, port, 0);
1375 			ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
1376 			fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
1377 				__func__, port, 1);
1378 			ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
1379 		} else {
1380 			fprintf(f, "[%s]SSO single workslot[%d] dump\n",
1381 				__func__, port);
1382 			ssogws_dump(event_dev->data->ports[port], f);
1383 		}
1384 	}
1385 
1386 	/* Dump SSO registers */
1387 	for (queue = 0; queue < dev->nb_event_queues; queue++) {
1388 		fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
1389 		if (dev->dual_ws) {
1390 			struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1391 			ssoggrp_dump(ws->grps_base[queue], f);
1392 		} else {
1393 			struct otx2_ssogws *ws = event_dev->data->ports[0];
1394 			ssoggrp_dump(ws->grps_base[queue], f);
1395 		}
1396 	}
1397 }
1398 
1399 static void
otx2_handle_event(void * arg,struct rte_event event)1400 otx2_handle_event(void *arg, struct rte_event event)
1401 {
1402 	struct rte_eventdev *event_dev = arg;
1403 
1404 	if (event_dev->dev_ops->dev_stop_flush != NULL)
1405 		event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
1406 				event, event_dev->data->dev_stop_flush_arg);
1407 }
1408 
1409 static void
sso_qos_cfg(struct rte_eventdev * event_dev)1410 sso_qos_cfg(struct rte_eventdev *event_dev)
1411 {
1412 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1413 	struct sso_grp_qos_cfg *req;
1414 	uint16_t i;
1415 
1416 	for (i = 0; i < dev->qos_queue_cnt; i++) {
1417 		uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
1418 		uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
1419 		uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
1420 
1421 		if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
1422 			continue;
1423 
1424 		req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
1425 		req->xaq_limit = (dev->nb_xaq_cfg *
1426 				  (xaq_prcnt ? xaq_prcnt : 100)) / 100;
1427 		req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
1428 				(iaq_prcnt ? iaq_prcnt : 100)) / 100;
1429 		req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
1430 				(taq_prcnt ? taq_prcnt : 100)) / 100;
1431 	}
1432 
1433 	if (dev->qos_queue_cnt)
1434 		otx2_mbox_process(dev->mbox);
1435 }
1436 
1437 static void
sso_cleanup(struct rte_eventdev * event_dev,uint8_t enable)1438 sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
1439 {
1440 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1441 	uint16_t i;
1442 
1443 	for (i = 0; i < dev->nb_event_ports; i++) {
1444 		if (dev->dual_ws) {
1445 			struct otx2_ssogws_dual *ws;
1446 
1447 			ws = event_dev->data->ports[i];
1448 			ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
1449 			ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
1450 			ws->swtag_req = 0;
1451 			ws->vws = 0;
1452 			ws->ws_state[0].cur_grp = 0;
1453 			ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1454 			ws->ws_state[1].cur_grp = 0;
1455 			ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY;
1456 		} else {
1457 			struct otx2_ssogws *ws;
1458 
1459 			ws = event_dev->data->ports[i];
1460 			ssogws_reset(ws);
1461 			ws->swtag_req = 0;
1462 			ws->cur_grp = 0;
1463 			ws->cur_tt = SSO_SYNC_EMPTY;
1464 		}
1465 	}
1466 
1467 	rte_mb();
1468 	if (dev->dual_ws) {
1469 		struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1470 		struct otx2_ssogws temp_ws;
1471 
1472 		memcpy(&temp_ws, &ws->ws_state[0],
1473 		       sizeof(struct otx2_ssogws_state));
1474 		for (i = 0; i < dev->nb_event_queues; i++) {
1475 			/* Consume all the events through HWS0 */
1476 			ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
1477 					    otx2_handle_event, event_dev);
1478 			/* Enable/Disable SSO GGRP */
1479 			otx2_write64(enable, ws->grps_base[i] +
1480 				     SSO_LF_GGRP_QCTL);
1481 		}
1482 		ws->ws_state[0].cur_grp = 0;
1483 		ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1484 	} else {
1485 		struct otx2_ssogws *ws = event_dev->data->ports[0];
1486 
1487 		for (i = 0; i < dev->nb_event_queues; i++) {
1488 			/* Consume all the events through HWS0 */
1489 			ssogws_flush_events(ws, i, ws->grps_base[i],
1490 					    otx2_handle_event, event_dev);
1491 			/* Enable/Disable SSO GGRP */
1492 			otx2_write64(enable, ws->grps_base[i] +
1493 				     SSO_LF_GGRP_QCTL);
1494 		}
1495 		ws->cur_grp = 0;
1496 		ws->cur_tt = SSO_SYNC_EMPTY;
1497 	}
1498 
1499 	/* reset SSO GWS cache */
1500 	otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
1501 	otx2_mbox_process(dev->mbox);
1502 }
1503 
1504 int
sso_xae_reconfigure(struct rte_eventdev * event_dev)1505 sso_xae_reconfigure(struct rte_eventdev *event_dev)
1506 {
1507 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1508 	struct rte_mempool *prev_xaq_pool;
1509 	int rc = 0;
1510 
1511 	if (event_dev->data->dev_started)
1512 		sso_cleanup(event_dev, 0);
1513 
1514 	prev_xaq_pool = dev->xaq_pool;
1515 	dev->xaq_pool = NULL;
1516 	rc = sso_xaq_allocate(dev);
1517 	if (rc < 0) {
1518 		otx2_err("Failed to alloc xaq pool %d", rc);
1519 		rte_mempool_free(prev_xaq_pool);
1520 		return rc;
1521 	}
1522 	rc = sso_ggrp_alloc_xaq(dev);
1523 	if (rc < 0) {
1524 		otx2_err("Failed to alloc xaq to ggrp %d", rc);
1525 		rte_mempool_free(prev_xaq_pool);
1526 		return rc;
1527 	}
1528 
1529 	rte_mempool_free(prev_xaq_pool);
1530 	rte_mb();
1531 	if (event_dev->data->dev_started)
1532 		sso_cleanup(event_dev, 1);
1533 
1534 	return 0;
1535 }
1536 
1537 static int
otx2_sso_start(struct rte_eventdev * event_dev)1538 otx2_sso_start(struct rte_eventdev *event_dev)
1539 {
1540 	sso_func_trace();
1541 	sso_qos_cfg(event_dev);
1542 	sso_cleanup(event_dev, 1);
1543 	sso_fastpath_fns_set(event_dev);
1544 
1545 	return 0;
1546 }
1547 
1548 static void
otx2_sso_stop(struct rte_eventdev * event_dev)1549 otx2_sso_stop(struct rte_eventdev *event_dev)
1550 {
1551 	sso_func_trace();
1552 	sso_cleanup(event_dev, 0);
1553 	rte_mb();
1554 }
1555 
1556 static int
otx2_sso_close(struct rte_eventdev * event_dev)1557 otx2_sso_close(struct rte_eventdev *event_dev)
1558 {
1559 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1560 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1561 	uint16_t i;
1562 
1563 	if (!dev->configured)
1564 		return 0;
1565 
1566 	sso_unregister_irqs(event_dev);
1567 
1568 	for (i = 0; i < dev->nb_event_queues; i++)
1569 		all_queues[i] = i;
1570 
1571 	for (i = 0; i < dev->nb_event_ports; i++)
1572 		otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
1573 				     all_queues, dev->nb_event_queues);
1574 
1575 	sso_lf_teardown(dev, SSO_LF_GGRP);
1576 	sso_lf_teardown(dev, SSO_LF_GWS);
1577 	dev->nb_event_ports = 0;
1578 	dev->nb_event_queues = 0;
1579 	rte_mempool_free(dev->xaq_pool);
1580 	rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
1581 
1582 	return 0;
1583 }
1584 
1585 /* Initialize and register event driver with DPDK Application */
1586 static struct rte_eventdev_ops otx2_sso_ops = {
1587 	.dev_infos_get    = otx2_sso_info_get,
1588 	.dev_configure    = otx2_sso_configure,
1589 	.queue_def_conf   = otx2_sso_queue_def_conf,
1590 	.queue_setup      = otx2_sso_queue_setup,
1591 	.queue_release    = otx2_sso_queue_release,
1592 	.port_def_conf    = otx2_sso_port_def_conf,
1593 	.port_setup       = otx2_sso_port_setup,
1594 	.port_release     = otx2_sso_port_release,
1595 	.port_link        = otx2_sso_port_link,
1596 	.port_unlink      = otx2_sso_port_unlink,
1597 	.timeout_ticks    = otx2_sso_timeout_ticks,
1598 
1599 	.eth_rx_adapter_caps_get  = otx2_sso_rx_adapter_caps_get,
1600 	.eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
1601 	.eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
1602 	.eth_rx_adapter_start = otx2_sso_rx_adapter_start,
1603 	.eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
1604 
1605 	.eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
1606 	.eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
1607 	.eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
1608 
1609 	.timer_adapter_caps_get = otx2_tim_caps_get,
1610 
1611 	.crypto_adapter_caps_get = otx2_ca_caps_get,
1612 	.crypto_adapter_queue_pair_add = otx2_ca_qp_add,
1613 	.crypto_adapter_queue_pair_del = otx2_ca_qp_del,
1614 
1615 	.xstats_get       = otx2_sso_xstats_get,
1616 	.xstats_reset     = otx2_sso_xstats_reset,
1617 	.xstats_get_names = otx2_sso_xstats_get_names,
1618 
1619 	.dump             = otx2_sso_dump,
1620 	.dev_start        = otx2_sso_start,
1621 	.dev_stop         = otx2_sso_stop,
1622 	.dev_close        = otx2_sso_close,
1623 	.dev_selftest     = otx2_sso_selftest,
1624 };
1625 
1626 #define OTX2_SSO_XAE_CNT	"xae_cnt"
1627 #define OTX2_SSO_SINGLE_WS	"single_ws"
1628 #define OTX2_SSO_GGRP_QOS	"qos"
1629 
1630 static void
parse_queue_param(char * value,void * opaque)1631 parse_queue_param(char *value, void *opaque)
1632 {
1633 	struct otx2_sso_qos queue_qos = {0};
1634 	uint8_t *val = (uint8_t *)&queue_qos;
1635 	struct otx2_sso_evdev *dev = opaque;
1636 	char *tok = strtok(value, "-");
1637 	struct otx2_sso_qos *old_ptr;
1638 
1639 	if (!strlen(value))
1640 		return;
1641 
1642 	while (tok != NULL) {
1643 		*val = atoi(tok);
1644 		tok = strtok(NULL, "-");
1645 		val++;
1646 	}
1647 
1648 	if (val != (&queue_qos.iaq_prcnt + 1)) {
1649 		otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
1650 		return;
1651 	}
1652 
1653 	dev->qos_queue_cnt++;
1654 	old_ptr = dev->qos_parse_data;
1655 	dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
1656 					  sizeof(struct otx2_sso_qos) *
1657 					  dev->qos_queue_cnt, 0);
1658 	if (dev->qos_parse_data == NULL) {
1659 		dev->qos_parse_data = old_ptr;
1660 		dev->qos_queue_cnt--;
1661 		return;
1662 	}
1663 	dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
1664 }
1665 
1666 static void
parse_qos_list(const char * value,void * opaque)1667 parse_qos_list(const char *value, void *opaque)
1668 {
1669 	char *s = strdup(value);
1670 	char *start = NULL;
1671 	char *end = NULL;
1672 	char *f = s;
1673 
1674 	while (*s) {
1675 		if (*s == '[')
1676 			start = s;
1677 		else if (*s == ']')
1678 			end = s;
1679 
1680 		if (start && start < end) {
1681 			*end = 0;
1682 			parse_queue_param(start + 1, opaque);
1683 			s = end;
1684 			start = end;
1685 		}
1686 		s++;
1687 	}
1688 
1689 	free(f);
1690 }
1691 
1692 static int
parse_sso_kvargs_dict(const char * key,const char * value,void * opaque)1693 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
1694 {
1695 	RTE_SET_USED(key);
1696 
1697 	/* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
1698 	 * isn't allowed. Everything is expressed in percentages, 0 represents
1699 	 * default.
1700 	 */
1701 	parse_qos_list(value, opaque);
1702 
1703 	return 0;
1704 }
1705 
1706 static void
sso_parse_devargs(struct otx2_sso_evdev * dev,struct rte_devargs * devargs)1707 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
1708 {
1709 	struct rte_kvargs *kvlist;
1710 	uint8_t single_ws = 0;
1711 
1712 	if (devargs == NULL)
1713 		return;
1714 	kvlist = rte_kvargs_parse(devargs->args, NULL);
1715 	if (kvlist == NULL)
1716 		return;
1717 
1718 	rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
1719 			   &dev->xae_cnt);
1720 	rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
1721 			   &single_ws);
1722 	rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
1723 			   dev);
1724 	otx2_parse_common_devargs(kvlist);
1725 	dev->dual_ws = !single_ws;
1726 	rte_kvargs_free(kvlist);
1727 }
1728 
1729 static int
otx2_sso_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)1730 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1731 {
1732 	return rte_event_pmd_pci_probe(pci_drv, pci_dev,
1733 				       sizeof(struct otx2_sso_evdev),
1734 				       otx2_sso_init);
1735 }
1736 
1737 static int
otx2_sso_remove(struct rte_pci_device * pci_dev)1738 otx2_sso_remove(struct rte_pci_device *pci_dev)
1739 {
1740 	return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
1741 }
1742 
1743 static const struct rte_pci_id pci_sso_map[] = {
1744 	{
1745 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1746 			       PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
1747 	},
1748 	{
1749 		.vendor_id = 0,
1750 	},
1751 };
1752 
1753 static struct rte_pci_driver pci_sso = {
1754 	.id_table = pci_sso_map,
1755 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1756 	.probe = otx2_sso_probe,
1757 	.remove = otx2_sso_remove,
1758 };
1759 
1760 int
otx2_sso_init(struct rte_eventdev * event_dev)1761 otx2_sso_init(struct rte_eventdev *event_dev)
1762 {
1763 	struct free_rsrcs_rsp *rsrc_cnt;
1764 	struct rte_pci_device *pci_dev;
1765 	struct otx2_sso_evdev *dev;
1766 	int rc;
1767 
1768 	event_dev->dev_ops = &otx2_sso_ops;
1769 	/* For secondary processes, the primary has done all the work */
1770 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1771 		sso_fastpath_fns_set(event_dev);
1772 		return 0;
1773 	}
1774 
1775 	dev = sso_pmd_priv(event_dev);
1776 
1777 	pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1778 
1779 	/* Initialize the base otx2_dev object */
1780 	rc = otx2_dev_init(pci_dev, dev);
1781 	if (rc < 0) {
1782 		otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1783 		goto error;
1784 	}
1785 
1786 	/* Get SSO and SSOW MSIX rsrc cnt */
1787 	otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
1788 	rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
1789 	if (rc < 0) {
1790 		otx2_err("Unable to get free rsrc count");
1791 		goto otx2_dev_uninit;
1792 	}
1793 	otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
1794 		     rsrc_cnt->ssow, rsrc_cnt->npa);
1795 
1796 	dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
1797 	dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
1798 	/* Grab the NPA LF if required */
1799 	rc = otx2_npa_lf_init(pci_dev, dev);
1800 	if (rc < 0) {
1801 		otx2_err("Unable to init NPA lf. It might not be provisioned");
1802 		goto otx2_dev_uninit;
1803 	}
1804 
1805 	dev->drv_inited = true;
1806 	dev->is_timeout_deq = 0;
1807 	dev->min_dequeue_timeout_ns = USEC2NSEC(1);
1808 	dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
1809 	dev->max_num_events = -1;
1810 	dev->nb_event_queues = 0;
1811 	dev->nb_event_ports = 0;
1812 
1813 	if (!dev->max_event_ports || !dev->max_event_queues) {
1814 		otx2_err("Not enough eventdev resource queues=%d ports=%d",
1815 			 dev->max_event_queues, dev->max_event_ports);
1816 		rc = -ENODEV;
1817 		goto otx2_npa_lf_uninit;
1818 	}
1819 
1820 	dev->dual_ws = 1;
1821 	sso_parse_devargs(dev, pci_dev->device.devargs);
1822 	if (dev->dual_ws) {
1823 		otx2_sso_dbg("Using dual workslot mode");
1824 		dev->max_event_ports = dev->max_event_ports / 2;
1825 	} else {
1826 		otx2_sso_dbg("Using single workslot mode");
1827 	}
1828 
1829 	otx2_sso_pf_func_set(dev->pf_func);
1830 	otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1831 		     event_dev->data->name, dev->max_event_queues,
1832 		     dev->max_event_ports);
1833 
1834 	otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
1835 
1836 	return 0;
1837 
1838 otx2_npa_lf_uninit:
1839 	otx2_npa_lf_fini();
1840 otx2_dev_uninit:
1841 	otx2_dev_fini(pci_dev, dev);
1842 error:
1843 	return rc;
1844 }
1845 
1846 int
otx2_sso_fini(struct rte_eventdev * event_dev)1847 otx2_sso_fini(struct rte_eventdev *event_dev)
1848 {
1849 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1850 	struct rte_pci_device *pci_dev;
1851 
1852 	/* For secondary processes, nothing to be done */
1853 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1854 		return 0;
1855 
1856 	pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1857 
1858 	if (!dev->drv_inited)
1859 		goto dev_fini;
1860 
1861 	dev->drv_inited = false;
1862 	otx2_npa_lf_fini();
1863 
1864 dev_fini:
1865 	if (otx2_npa_lf_active(dev)) {
1866 		otx2_info("Common resource in use by other devices");
1867 		return -EAGAIN;
1868 	}
1869 
1870 	otx2_tim_fini();
1871 	otx2_dev_fini(pci_dev, dev);
1872 
1873 	return 0;
1874 }
1875 
1876 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
1877 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
1878 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
1879 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
1880 			      OTX2_SSO_SINGLE_WS "=1"
1881 			      OTX2_SSO_GGRP_QOS "=<string>"
1882 			      OTX2_NPA_LOCK_MASK "=<1-65535>");
1883