1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #include "otx2_worker.h"
6
7 static __rte_noinline uint8_t
otx2_ssogws_new_event(struct otx2_ssogws * ws,const struct rte_event * ev)8 otx2_ssogws_new_event(struct otx2_ssogws *ws, const struct rte_event *ev)
9 {
10 const uint32_t tag = (uint32_t)ev->event;
11 const uint8_t new_tt = ev->sched_type;
12 const uint64_t event_ptr = ev->u64;
13 const uint16_t grp = ev->queue_id;
14
15 if (ws->xaq_lmt <= *ws->fc_mem)
16 return 0;
17
18 otx2_ssogws_add_work(ws, event_ptr, tag, new_tt, grp);
19
20 return 1;
21 }
22
23 static __rte_always_inline void
otx2_ssogws_fwd_swtag(struct otx2_ssogws * ws,const struct rte_event * ev)24 otx2_ssogws_fwd_swtag(struct otx2_ssogws *ws, const struct rte_event *ev)
25 {
26 const uint32_t tag = (uint32_t)ev->event;
27 const uint8_t new_tt = ev->sched_type;
28 const uint8_t cur_tt = ws->cur_tt;
29
30 /* 96XX model
31 * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
32 *
33 * SSO_SYNC_ORDERED norm norm untag
34 * SSO_SYNC_ATOMIC norm norm untag
35 * SSO_SYNC_UNTAGGED norm norm NOOP
36 */
37
38 if (new_tt == SSO_SYNC_UNTAGGED) {
39 if (cur_tt != SSO_SYNC_UNTAGGED)
40 otx2_ssogws_swtag_untag(ws);
41 } else {
42 otx2_ssogws_swtag_norm(ws, tag, new_tt);
43 }
44
45 ws->swtag_req = 1;
46 }
47
48 static __rte_always_inline void
otx2_ssogws_fwd_group(struct otx2_ssogws * ws,const struct rte_event * ev,const uint16_t grp)49 otx2_ssogws_fwd_group(struct otx2_ssogws *ws, const struct rte_event *ev,
50 const uint16_t grp)
51 {
52 const uint32_t tag = (uint32_t)ev->event;
53 const uint8_t new_tt = ev->sched_type;
54
55 otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
56 SSOW_LF_GWS_OP_UPD_WQP_GRP1);
57 rte_smp_wmb();
58 otx2_ssogws_swtag_desched(ws, tag, new_tt, grp);
59 }
60
61 static __rte_always_inline void
otx2_ssogws_forward_event(struct otx2_ssogws * ws,const struct rte_event * ev)62 otx2_ssogws_forward_event(struct otx2_ssogws *ws, const struct rte_event *ev)
63 {
64 const uint8_t grp = ev->queue_id;
65
66 /* Group hasn't changed, Use SWTAG to forward the event */
67 if (ws->cur_grp == grp)
68 otx2_ssogws_fwd_swtag(ws, ev);
69 else
70 /*
71 * Group has been changed for group based work pipelining,
72 * Use deschedule/add_work operation to transfer the event to
73 * new group/core
74 */
75 otx2_ssogws_fwd_group(ws, ev, grp);
76 }
77
78 static __rte_always_inline void
otx2_ssogws_release_event(struct otx2_ssogws * ws)79 otx2_ssogws_release_event(struct otx2_ssogws *ws)
80 {
81 otx2_ssogws_swtag_flush(ws);
82 }
83
84 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
85 uint16_t __rte_hot \
86 otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
87 uint64_t timeout_ticks) \
88 { \
89 struct otx2_ssogws *ws = port; \
90 \
91 RTE_SET_USED(timeout_ticks); \
92 \
93 if (ws->swtag_req) { \
94 ws->swtag_req = 0; \
95 otx2_ssogws_swtag_wait(ws); \
96 return 1; \
97 } \
98 \
99 return otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
100 } \
101 \
102 uint16_t __rte_hot \
103 otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
104 uint16_t nb_events, \
105 uint64_t timeout_ticks) \
106 { \
107 RTE_SET_USED(nb_events); \
108 \
109 return otx2_ssogws_deq_ ##name(port, ev, timeout_ticks); \
110 } \
111 \
112 uint16_t __rte_hot \
113 otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev, \
114 uint64_t timeout_ticks) \
115 { \
116 struct otx2_ssogws *ws = port; \
117 uint16_t ret = 1; \
118 uint64_t iter; \
119 \
120 if (ws->swtag_req) { \
121 ws->swtag_req = 0; \
122 otx2_ssogws_swtag_wait(ws); \
123 return ret; \
124 } \
125 \
126 ret = otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
127 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
128 ret = otx2_ssogws_get_work(ws, ev, flags, \
129 ws->lookup_mem); \
130 \
131 return ret; \
132 } \
133 \
134 uint16_t __rte_hot \
135 otx2_ssogws_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],\
136 uint16_t nb_events, \
137 uint64_t timeout_ticks) \
138 { \
139 RTE_SET_USED(nb_events); \
140 \
141 return otx2_ssogws_deq_timeout_ ##name(port, ev, timeout_ticks);\
142 } \
143 \
144 uint16_t __rte_hot \
145 otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
146 uint64_t timeout_ticks) \
147 { \
148 struct otx2_ssogws *ws = port; \
149 \
150 RTE_SET_USED(timeout_ticks); \
151 \
152 if (ws->swtag_req) { \
153 ws->swtag_req = 0; \
154 otx2_ssogws_swtag_wait(ws); \
155 return 1; \
156 } \
157 \
158 return otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
159 ws->lookup_mem); \
160 } \
161 \
162 uint16_t __rte_hot \
163 otx2_ssogws_deq_seg_burst_ ##name(void *port, struct rte_event ev[], \
164 uint16_t nb_events, \
165 uint64_t timeout_ticks) \
166 { \
167 RTE_SET_USED(nb_events); \
168 \
169 return otx2_ssogws_deq_seg_ ##name(port, ev, timeout_ticks); \
170 } \
171 \
172 uint16_t __rte_hot \
173 otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev, \
174 uint64_t timeout_ticks) \
175 { \
176 struct otx2_ssogws *ws = port; \
177 uint16_t ret = 1; \
178 uint64_t iter; \
179 \
180 if (ws->swtag_req) { \
181 ws->swtag_req = 0; \
182 otx2_ssogws_swtag_wait(ws); \
183 return ret; \
184 } \
185 \
186 ret = otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
187 ws->lookup_mem); \
188 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
189 ret = otx2_ssogws_get_work(ws, ev, \
190 flags | NIX_RX_MULTI_SEG_F, \
191 ws->lookup_mem); \
192 \
193 return ret; \
194 } \
195 \
196 uint16_t __rte_hot \
197 otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
198 struct rte_event ev[], \
199 uint16_t nb_events, \
200 uint64_t timeout_ticks) \
201 { \
202 RTE_SET_USED(nb_events); \
203 \
204 return otx2_ssogws_deq_seg_timeout_ ##name(port, ev, \
205 timeout_ticks); \
206 }
207
208 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
209 #undef R
210
211 uint16_t __rte_hot
otx2_ssogws_enq(void * port,const struct rte_event * ev)212 otx2_ssogws_enq(void *port, const struct rte_event *ev)
213 {
214 struct otx2_ssogws *ws = port;
215
216 switch (ev->op) {
217 case RTE_EVENT_OP_NEW:
218 rte_smp_mb();
219 return otx2_ssogws_new_event(ws, ev);
220 case RTE_EVENT_OP_FORWARD:
221 otx2_ssogws_forward_event(ws, ev);
222 break;
223 case RTE_EVENT_OP_RELEASE:
224 otx2_ssogws_release_event(ws);
225 break;
226 default:
227 return 0;
228 }
229
230 return 1;
231 }
232
233 uint16_t __rte_hot
otx2_ssogws_enq_burst(void * port,const struct rte_event ev[],uint16_t nb_events)234 otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
235 uint16_t nb_events)
236 {
237 RTE_SET_USED(nb_events);
238 return otx2_ssogws_enq(port, ev);
239 }
240
241 uint16_t __rte_hot
otx2_ssogws_enq_new_burst(void * port,const struct rte_event ev[],uint16_t nb_events)242 otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
243 uint16_t nb_events)
244 {
245 struct otx2_ssogws *ws = port;
246 uint16_t i, rc = 1;
247
248 rte_smp_mb();
249 if (ws->xaq_lmt <= *ws->fc_mem)
250 return 0;
251
252 for (i = 0; i < nb_events && rc; i++)
253 rc = otx2_ssogws_new_event(ws, &ev[i]);
254
255 return nb_events;
256 }
257
258 uint16_t __rte_hot
otx2_ssogws_enq_fwd_burst(void * port,const struct rte_event ev[],uint16_t nb_events)259 otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
260 uint16_t nb_events)
261 {
262 struct otx2_ssogws *ws = port;
263
264 RTE_SET_USED(nb_events);
265 otx2_ssogws_forward_event(ws, ev);
266
267 return 1;
268 }
269
270 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
271 uint16_t __rte_hot \
272 otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[], \
273 uint16_t nb_events) \
274 { \
275 struct otx2_ssogws *ws = port; \
276 uint64_t cmd[sz]; \
277 int i; \
278 \
279 for (i = 0; i < nb_events; i++) \
280 otx2_ssogws_event_tx(ws, &ev[i], cmd, (const uint64_t \
281 (*)[RTE_MAX_QUEUES_PER_PORT]) \
282 &ws->tx_adptr_data, \
283 flags); \
284 return nb_events; \
285 }
286 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
287 #undef T
288
289 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
290 uint16_t __rte_hot \
291 otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
292 uint16_t nb_events) \
293 { \
294 uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
295 struct otx2_ssogws *ws = port; \
296 int i; \
297 \
298 for (i = 0; i < nb_events; i++) \
299 otx2_ssogws_event_tx(ws, &ev[i], cmd, (const uint64_t \
300 (*)[RTE_MAX_QUEUES_PER_PORT]) \
301 &ws->tx_adptr_data, \
302 (flags) | NIX_TX_MULTI_SEG_F); \
303 return nb_events; \
304 }
305 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
306 #undef T
307
308 void
ssogws_flush_events(struct otx2_ssogws * ws,uint8_t queue_id,uintptr_t base,otx2_handle_event_t fn,void * arg)309 ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id, uintptr_t base,
310 otx2_handle_event_t fn, void *arg)
311 {
312 uint64_t cq_ds_cnt = 1;
313 uint64_t aq_cnt = 1;
314 uint64_t ds_cnt = 1;
315 struct rte_event ev;
316 uint64_t enable;
317 uint64_t val;
318
319 enable = otx2_read64(base + SSO_LF_GGRP_QCTL);
320 if (!enable)
321 return;
322
323 val = queue_id; /* GGRP ID */
324 val |= BIT_ULL(18); /* Grouped */
325 val |= BIT_ULL(16); /* WAIT */
326
327 aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
328 ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
329 cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
330 cq_ds_cnt &= 0x3FFF3FFF0000;
331
332 while (aq_cnt || cq_ds_cnt || ds_cnt) {
333 otx2_write64(val, ws->getwrk_op);
334 otx2_ssogws_get_work_empty(ws, &ev, 0);
335 if (fn != NULL && ev.u64 != 0)
336 fn(arg, ev);
337 if (ev.sched_type != SSO_TT_EMPTY)
338 otx2_ssogws_swtag_flush(ws);
339 rte_mb();
340 aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
341 ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
342 cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
343 /* Extract cq and ds count */
344 cq_ds_cnt &= 0x3FFF3FFF0000;
345 }
346
347 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
348 SSOW_LF_GWS_OP_GWC_INVAL);
349 rte_mb();
350 }
351
352 void
ssogws_reset(struct otx2_ssogws * ws)353 ssogws_reset(struct otx2_ssogws *ws)
354 {
355 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
356 uint64_t pend_state;
357 uint8_t pend_tt;
358 uint64_t tag;
359
360 /* Wait till getwork/swtp/waitw/desched completes. */
361 do {
362 pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
363 rte_mb();
364 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58)));
365
366 tag = otx2_read64(base + SSOW_LF_GWS_TAG);
367 pend_tt = (tag >> 32) & 0x3;
368 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
369 if (pend_tt == SSO_SYNC_ATOMIC || pend_tt == SSO_SYNC_ORDERED)
370 otx2_ssogws_swtag_untag(ws);
371 otx2_ssogws_desched(ws);
372 }
373 rte_mb();
374
375 /* Wait for desched to complete. */
376 do {
377 pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
378 rte_mb();
379 } while (pend_state & BIT_ULL(58));
380 }
381