1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #ifndef __OTX2_WORKER_H__
6 #define __OTX2_WORKER_H__
7
8 #include <rte_common.h>
9 #include <rte_branch_prediction.h>
10
11 #include <otx2_common.h>
12 #include "otx2_evdev.h"
13 #include "otx2_evdev_crypto_adptr_dp.h"
14 #include "otx2_ethdev_sec_tx.h"
15
16 /* SSO Operations */
17
18 static __rte_always_inline uint16_t
otx2_ssogws_get_work(struct otx2_ssogws * ws,struct rte_event * ev,const uint32_t flags,const void * const lookup_mem)19 otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev,
20 const uint32_t flags, const void * const lookup_mem)
21 {
22 union otx2_sso_event event;
23 uint64_t tstamp_ptr;
24 uint64_t get_work1;
25 uint64_t mbuf;
26
27 otx2_write64(BIT_ULL(16) | /* wait for work. */
28 1, /* Use Mask set 0. */
29 ws->getwrk_op);
30
31 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
32 rte_prefetch_non_temporal(lookup_mem);
33 #ifdef RTE_ARCH_ARM64
34 asm volatile(
35 " ldr %[tag], [%[tag_loc]] \n"
36 " ldr %[wqp], [%[wqp_loc]] \n"
37 " tbz %[tag], 63, done%= \n"
38 " sevl \n"
39 "rty%=: wfe \n"
40 " ldr %[tag], [%[tag_loc]] \n"
41 " ldr %[wqp], [%[wqp_loc]] \n"
42 " tbnz %[tag], 63, rty%= \n"
43 "done%=: dmb ld \n"
44 " prfm pldl1keep, [%[wqp], #8] \n"
45 " sub %[mbuf], %[wqp], #0x80 \n"
46 " prfm pldl1keep, [%[mbuf]] \n"
47 : [tag] "=&r" (event.get_work0),
48 [wqp] "=&r" (get_work1),
49 [mbuf] "=&r" (mbuf)
50 : [tag_loc] "r" (ws->tag_op),
51 [wqp_loc] "r" (ws->wqp_op)
52 );
53 #else
54 event.get_work0 = otx2_read64(ws->tag_op);
55 while ((BIT_ULL(63)) & event.get_work0)
56 event.get_work0 = otx2_read64(ws->tag_op);
57
58 get_work1 = otx2_read64(ws->wqp_op);
59 rte_prefetch0((const void *)get_work1);
60 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
61 rte_prefetch0((const void *)mbuf);
62 #endif
63
64 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
65 (event.get_work0 & (0x3FFull << 36)) << 4 |
66 (event.get_work0 & 0xffffffff);
67 ws->cur_tt = event.sched_type;
68 ws->cur_grp = event.queue_id;
69
70 if (event.sched_type != SSO_TT_EMPTY) {
71 if ((flags & NIX_RX_OFFLOAD_SECURITY_F) &&
72 (event.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
73 get_work1 = otx2_handle_crypto_event(get_work1);
74 } else if (event.event_type == RTE_EVENT_TYPE_ETHDEV) {
75 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
76 (uint32_t) event.get_work0, flags,
77 lookup_mem);
78 /* Extracting tstamp, if PTP enabled*/
79 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
80 get_work1) +
81 OTX2_SSO_WQE_SG_PTR);
82 otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
83 ws->tstamp, flags,
84 (uint64_t *)tstamp_ptr);
85 get_work1 = mbuf;
86 }
87 }
88
89 ev->event = event.get_work0;
90 ev->u64 = get_work1;
91
92 return !!get_work1;
93 }
94
95 /* Used in cleaning up workslot. */
96 static __rte_always_inline uint16_t
otx2_ssogws_get_work_empty(struct otx2_ssogws * ws,struct rte_event * ev,const uint32_t flags)97 otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev,
98 const uint32_t flags)
99 {
100 union otx2_sso_event event;
101 uint64_t tstamp_ptr;
102 uint64_t get_work1;
103 uint64_t mbuf;
104
105 #ifdef RTE_ARCH_ARM64
106 asm volatile(
107 " ldr %[tag], [%[tag_loc]] \n"
108 " ldr %[wqp], [%[wqp_loc]] \n"
109 " tbz %[tag], 63, done%= \n"
110 " sevl \n"
111 "rty%=: wfe \n"
112 " ldr %[tag], [%[tag_loc]] \n"
113 " ldr %[wqp], [%[wqp_loc]] \n"
114 " tbnz %[tag], 63, rty%= \n"
115 "done%=: dmb ld \n"
116 " prfm pldl1keep, [%[wqp], #8] \n"
117 " sub %[mbuf], %[wqp], #0x80 \n"
118 " prfm pldl1keep, [%[mbuf]] \n"
119 : [tag] "=&r" (event.get_work0),
120 [wqp] "=&r" (get_work1),
121 [mbuf] "=&r" (mbuf)
122 : [tag_loc] "r" (ws->tag_op),
123 [wqp_loc] "r" (ws->wqp_op)
124 );
125 #else
126 event.get_work0 = otx2_read64(ws->tag_op);
127 while ((BIT_ULL(63)) & event.get_work0)
128 event.get_work0 = otx2_read64(ws->tag_op);
129
130 get_work1 = otx2_read64(ws->wqp_op);
131 rte_prefetch_non_temporal((const void *)get_work1);
132 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
133 rte_prefetch_non_temporal((const void *)mbuf);
134 #endif
135
136 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
137 (event.get_work0 & (0x3FFull << 36)) << 4 |
138 (event.get_work0 & 0xffffffff);
139 ws->cur_tt = event.sched_type;
140 ws->cur_grp = event.queue_id;
141
142 if (event.sched_type != SSO_TT_EMPTY &&
143 event.event_type == RTE_EVENT_TYPE_ETHDEV) {
144 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
145 (uint32_t) event.get_work0, flags, NULL);
146 /* Extracting tstamp, if PTP enabled*/
147 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
148 + OTX2_SSO_WQE_SG_PTR);
149 otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
150 flags, (uint64_t *)tstamp_ptr);
151 get_work1 = mbuf;
152 }
153
154 ev->event = event.get_work0;
155 ev->u64 = get_work1;
156
157 return !!get_work1;
158 }
159
160 static __rte_always_inline void
otx2_ssogws_add_work(struct otx2_ssogws * ws,const uint64_t event_ptr,const uint32_t tag,const uint8_t new_tt,const uint16_t grp)161 otx2_ssogws_add_work(struct otx2_ssogws *ws, const uint64_t event_ptr,
162 const uint32_t tag, const uint8_t new_tt,
163 const uint16_t grp)
164 {
165 uint64_t add_work0;
166
167 add_work0 = tag | ((uint64_t)(new_tt) << 32);
168 otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
169 }
170
171 static __rte_always_inline void
otx2_ssogws_swtag_desched(struct otx2_ssogws * ws,uint32_t tag,uint8_t new_tt,uint16_t grp)172 otx2_ssogws_swtag_desched(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt,
173 uint16_t grp)
174 {
175 uint64_t val;
176
177 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
178 otx2_write64(val, ws->swtag_desched_op);
179 }
180
181 static __rte_always_inline void
otx2_ssogws_swtag_norm(struct otx2_ssogws * ws,uint32_t tag,uint8_t new_tt)182 otx2_ssogws_swtag_norm(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt)
183 {
184 uint64_t val;
185
186 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
187 otx2_write64(val, ws->swtag_norm_op);
188 }
189
190 static __rte_always_inline void
otx2_ssogws_swtag_untag(struct otx2_ssogws * ws)191 otx2_ssogws_swtag_untag(struct otx2_ssogws *ws)
192 {
193 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
194 SSOW_LF_GWS_OP_SWTAG_UNTAG);
195 ws->cur_tt = SSO_SYNC_UNTAGGED;
196 }
197
198 static __rte_always_inline void
otx2_ssogws_swtag_flush(struct otx2_ssogws * ws)199 otx2_ssogws_swtag_flush(struct otx2_ssogws *ws)
200 {
201 if (OTX2_SSOW_TT_FROM_TAG(otx2_read64(ws->tag_op)) == SSO_TT_EMPTY) {
202 ws->cur_tt = SSO_SYNC_EMPTY;
203 return;
204 }
205 otx2_write64(0, ws->swtag_flush_op);
206 ws->cur_tt = SSO_SYNC_EMPTY;
207 }
208
209 static __rte_always_inline void
otx2_ssogws_desched(struct otx2_ssogws * ws)210 otx2_ssogws_desched(struct otx2_ssogws *ws)
211 {
212 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
213 SSOW_LF_GWS_OP_DESCHED);
214 }
215
216 static __rte_always_inline void
otx2_ssogws_swtag_wait(struct otx2_ssogws * ws)217 otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
218 {
219 #ifdef RTE_ARCH_ARM64
220 uint64_t swtp;
221
222 asm volatile(" ldr %[swtb], [%[swtp_loc]] \n"
223 " tbz %[swtb], 62, done%= \n"
224 " sevl \n"
225 "rty%=: wfe \n"
226 " ldr %[swtb], [%[swtp_loc]] \n"
227 " tbnz %[swtb], 62, rty%= \n"
228 "done%=: \n"
229 : [swtb] "=&r" (swtp)
230 : [swtp_loc] "r" (ws->tag_op));
231 #else
232 /* Wait for the SWTAG/SWTAG_FULL operation */
233 while (otx2_read64(ws->tag_op) & BIT_ULL(62))
234 ;
235 #endif
236 }
237
238 static __rte_always_inline void
otx2_ssogws_head_wait(struct otx2_ssogws * ws)239 otx2_ssogws_head_wait(struct otx2_ssogws *ws)
240 {
241 #ifdef RTE_ARCH_ARM64
242 uint64_t tag;
243
244 asm volatile (
245 " ldr %[tag], [%[tag_op]] \n"
246 " tbnz %[tag], 35, done%= \n"
247 " sevl \n"
248 "rty%=: wfe \n"
249 " ldr %[tag], [%[tag_op]] \n"
250 " tbz %[tag], 35, rty%= \n"
251 "done%=: \n"
252 : [tag] "=&r" (tag)
253 : [tag_op] "r" (ws->tag_op)
254 );
255 #else
256 /* Wait for the HEAD to be set */
257 while (!(otx2_read64(ws->tag_op) & BIT_ULL(35)))
258 ;
259 #endif
260 }
261
262 static __rte_always_inline const struct otx2_eth_txq *
otx2_ssogws_xtract_meta(struct rte_mbuf * m,const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])263 otx2_ssogws_xtract_meta(struct rte_mbuf *m,
264 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
265 {
266 return (const struct otx2_eth_txq *)txq_data[m->port][
267 rte_event_eth_tx_adapter_txq_get(m)];
268 }
269
270 static __rte_always_inline void
otx2_ssogws_prepare_pkt(const struct otx2_eth_txq * txq,struct rte_mbuf * m,uint64_t * cmd,const uint32_t flags)271 otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
272 uint64_t *cmd, const uint32_t flags)
273 {
274 otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
275 otx2_nix_xmit_prepare(m, cmd, flags);
276 }
277
278 static __rte_always_inline uint16_t
otx2_ssogws_event_tx(struct otx2_ssogws * ws,struct rte_event * ev,uint64_t * cmd,const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],const uint32_t flags)279 otx2_ssogws_event_tx(struct otx2_ssogws *ws, struct rte_event *ev,
280 uint64_t *cmd,
281 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
282 const uint32_t flags)
283 {
284 struct rte_mbuf *m = ev->mbuf;
285 const struct otx2_eth_txq *txq;
286 uint16_t ref_cnt = m->refcnt;
287
288 if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
289 (m->ol_flags & PKT_TX_SEC_OFFLOAD)) {
290 txq = otx2_ssogws_xtract_meta(m, txq_data);
291 return otx2_sec_event_tx(ws, ev, m, txq, flags);
292 }
293
294 /* Perform header writes before barrier for TSO */
295 otx2_nix_xmit_prepare_tso(m, flags);
296 /* Lets commit any changes in the packet here in case when
297 * fast free is set as no further changes will be made to mbuf.
298 * In case of fast free is not set, both otx2_nix_prepare_mseg()
299 * and otx2_nix_xmit_prepare() has a barrier after refcnt update.
300 */
301 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
302 rte_io_wmb();
303 txq = otx2_ssogws_xtract_meta(m, txq_data);
304 otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
305
306 if (flags & NIX_TX_MULTI_SEG_F) {
307 const uint16_t segdw = otx2_nix_prepare_mseg(m, cmd, flags);
308 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
309 m->ol_flags, segdw, flags);
310 if (!ev->sched_type) {
311 otx2_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
312 otx2_ssogws_head_wait(ws);
313 if (otx2_nix_xmit_submit_lmt(txq->io_addr) == 0)
314 otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr,
315 txq->io_addr, segdw);
316 } else {
317 otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr,
318 txq->io_addr, segdw);
319 }
320 } else {
321 /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
322 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
323 m->ol_flags, 4, flags);
324
325 if (!ev->sched_type) {
326 otx2_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
327 otx2_ssogws_head_wait(ws);
328 if (otx2_nix_xmit_submit_lmt(txq->io_addr) == 0)
329 otx2_nix_xmit_one(cmd, txq->lmt_addr,
330 txq->io_addr, flags);
331 } else {
332 otx2_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
333 flags);
334 }
335 }
336
337 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
338 if (ref_cnt > 1)
339 return 1;
340 }
341
342 otx2_ssogws_swtag_flush(ws);
343
344 return 1;
345 }
346
347 #endif
348