1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4 #include "roc_api.h"
5 #include "roc_priv.h"
6
7 static int
npc_mcam_alloc_counter(struct npc * npc,uint16_t * ctr)8 npc_mcam_alloc_counter(struct npc *npc, uint16_t *ctr)
9 {
10 struct npc_mcam_alloc_counter_req *req;
11 struct npc_mcam_alloc_counter_rsp *rsp;
12 struct mbox *mbox = npc->mbox;
13 int rc = -ENOSPC;
14
15 req = mbox_alloc_msg_npc_mcam_alloc_counter(mbox);
16 if (req == NULL)
17 return rc;
18 req->count = 1;
19 rc = mbox_process_msg(mbox, (void *)&rsp);
20 if (rc)
21 return rc;
22 *ctr = rsp->cntr_list[0];
23 return rc;
24 }
25
26 int
npc_mcam_free_counter(struct npc * npc,uint16_t ctr_id)27 npc_mcam_free_counter(struct npc *npc, uint16_t ctr_id)
28 {
29 struct npc_mcam_oper_counter_req *req;
30 struct mbox *mbox = npc->mbox;
31 int rc = -ENOSPC;
32
33 req = mbox_alloc_msg_npc_mcam_free_counter(mbox);
34 if (req == NULL)
35 return rc;
36 req->cntr = ctr_id;
37 return mbox_process(mbox);
38 }
39
40 int
npc_mcam_read_counter(struct npc * npc,uint32_t ctr_id,uint64_t * count)41 npc_mcam_read_counter(struct npc *npc, uint32_t ctr_id, uint64_t *count)
42 {
43 struct npc_mcam_oper_counter_req *req;
44 struct npc_mcam_oper_counter_rsp *rsp;
45 struct mbox *mbox = npc->mbox;
46 int rc = -ENOSPC;
47
48 req = mbox_alloc_msg_npc_mcam_counter_stats(mbox);
49 if (req == NULL)
50 return rc;
51 req->cntr = ctr_id;
52 rc = mbox_process_msg(mbox, (void *)&rsp);
53 if (rc)
54 return rc;
55 *count = rsp->stat;
56 return rc;
57 }
58
59 int
npc_mcam_clear_counter(struct npc * npc,uint32_t ctr_id)60 npc_mcam_clear_counter(struct npc *npc, uint32_t ctr_id)
61 {
62 struct npc_mcam_oper_counter_req *req;
63 struct mbox *mbox = npc->mbox;
64 int rc = -ENOSPC;
65
66 req = mbox_alloc_msg_npc_mcam_clear_counter(mbox);
67 if (req == NULL)
68 return rc;
69 req->cntr = ctr_id;
70 return mbox_process(mbox);
71 }
72
73 int
npc_mcam_free_entry(struct npc * npc,uint32_t entry)74 npc_mcam_free_entry(struct npc *npc, uint32_t entry)
75 {
76 struct npc_mcam_free_entry_req *req;
77 struct mbox *mbox = npc->mbox;
78 int rc = -ENOSPC;
79
80 req = mbox_alloc_msg_npc_mcam_free_entry(mbox);
81 if (req == NULL)
82 return rc;
83 req->entry = entry;
84 return mbox_process(mbox);
85 }
86
87 int
npc_mcam_free_all_entries(struct npc * npc)88 npc_mcam_free_all_entries(struct npc *npc)
89 {
90 struct npc_mcam_free_entry_req *req;
91 struct mbox *mbox = npc->mbox;
92 int rc = -ENOSPC;
93
94 req = mbox_alloc_msg_npc_mcam_free_entry(mbox);
95 if (req == NULL)
96 return rc;
97 req->all = 1;
98 return mbox_process(mbox);
99 }
100
101 static int
npc_supp_key_len(uint32_t supp_mask)102 npc_supp_key_len(uint32_t supp_mask)
103 {
104 int nib_count = 0;
105
106 while (supp_mask) {
107 nib_count++;
108 supp_mask &= (supp_mask - 1);
109 }
110 return nib_count * 4;
111 }
112
113 /**
114 * Returns true if any LDATA bits are extracted for specific LID+LTYPE.
115 *
116 * No LFLAG extraction is taken into account.
117 */
118 static int
npc_lid_lt_in_kex(struct npc * npc,uint8_t lid,uint8_t lt)119 npc_lid_lt_in_kex(struct npc *npc, uint8_t lid, uint8_t lt)
120 {
121 struct npc_xtract_info *x_info;
122 int i;
123
124 for (i = 0; i < NPC_MAX_LD; i++) {
125 x_info = &npc->prx_dxcfg[NIX_INTF_RX][lid][lt].xtract[i];
126 /* Check for LDATA */
127 if (x_info->enable && x_info->len > 0)
128 return true;
129 }
130
131 return false;
132 }
133
134 static void
npc_construct_ldata_mask(struct npc * npc,struct plt_bitmap * bmap,uint8_t lid,uint8_t lt,uint8_t ld)135 npc_construct_ldata_mask(struct npc *npc, struct plt_bitmap *bmap, uint8_t lid,
136 uint8_t lt, uint8_t ld)
137 {
138 struct npc_xtract_info *x_info, *infoflag;
139 int hdr_off, keylen;
140 npc_dxcfg_t *p;
141 npc_fxcfg_t *q;
142 int i, j;
143
144 p = &npc->prx_dxcfg;
145 x_info = &(*p)[0][lid][lt].xtract[ld];
146
147 if (x_info->enable == 0)
148 return;
149
150 hdr_off = x_info->hdr_off * 8;
151 keylen = x_info->len * 8;
152 for (i = hdr_off; i < (hdr_off + keylen); i++)
153 plt_bitmap_set(bmap, i);
154
155 if (x_info->flags_enable == 0)
156 return;
157
158 if ((npc->prx_lfcfg[0].i & 0x7) != lid)
159 return;
160
161 q = &npc->prx_fxcfg;
162 for (j = 0; j < NPC_MAX_LFL; j++) {
163 infoflag = &(*q)[0][ld][j].xtract[0];
164 if (infoflag->enable) {
165 hdr_off = infoflag->hdr_off * 8;
166 keylen = infoflag->len * 8;
167 for (i = hdr_off; i < (hdr_off + keylen); i++)
168 plt_bitmap_set(bmap, i);
169 }
170 }
171 }
172
173 /**
174 * Check if given LID+LTYPE combination is present in KEX
175 *
176 * len is non-zero, this function will return true if KEX extracts len bytes
177 * at given offset. Otherwise it'll return true if any bytes are extracted
178 * specifically for given LID+LTYPE combination (meaning not LFLAG based).
179 * The second case increases flexibility for custom frames whose extracted
180 * bits may change depending on KEX profile loaded.
181 *
182 * @param npc NPC context structure
183 * @param lid Layer ID to check for
184 * @param lt Layer Type to check for
185 * @param offset offset into the layer header to match
186 * @param len length of the match
187 */
188 static bool
npc_is_kex_enabled(struct npc * npc,uint8_t lid,uint8_t lt,int offset,int len)189 npc_is_kex_enabled(struct npc *npc, uint8_t lid, uint8_t lt, int offset,
190 int len)
191 {
192 struct plt_bitmap *bmap;
193 uint32_t bmap_sz;
194 uint8_t *mem;
195 int i;
196
197 if (!len)
198 return npc_lid_lt_in_kex(npc, lid, lt);
199
200 bmap_sz = plt_bitmap_get_memory_footprint(300 * 8);
201 mem = plt_zmalloc(bmap_sz, 0);
202 if (mem == NULL) {
203 plt_err("mem alloc failed");
204 return false;
205 }
206 bmap = plt_bitmap_init(300 * 8, mem, bmap_sz);
207 if (bmap == NULL) {
208 plt_err("mem alloc failed");
209 plt_free(mem);
210 return false;
211 }
212
213 npc_construct_ldata_mask(npc, bmap, lid, lt, 0);
214 npc_construct_ldata_mask(npc, bmap, lid, lt, 1);
215
216 for (i = offset; i < (offset + len); i++) {
217 if (plt_bitmap_get(bmap, i) != 0x1) {
218 plt_free(mem);
219 return false;
220 }
221 }
222
223 plt_free(mem);
224 return true;
225 }
226
227 uint64_t
npc_get_kex_capability(struct npc * npc)228 npc_get_kex_capability(struct npc *npc)
229 {
230 npc_kex_cap_terms_t kex_cap;
231
232 memset(&kex_cap, 0, sizeof(kex_cap));
233
234 /* Ethtype: Offset 12B, len 2B */
235 kex_cap.bit.ethtype_0 = npc_is_kex_enabled(
236 npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
237 /* QINQ VLAN Ethtype: offset 8B, len 2B */
238 kex_cap.bit.ethtype_x = npc_is_kex_enabled(
239 npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
240 /* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
241 kex_cap.bit.vlan_id_0 = npc_is_kex_enabled(
242 npc, NPC_LID_LB, NPC_LT_LB_CTAG, 2 * 8, 2 * 8);
243 /* VLAN ID0 : Inner VLAN: offset 6B, len 2B */
244 kex_cap.bit.vlan_id_x = npc_is_kex_enabled(
245 npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6 * 8, 2 * 8);
246 /* DMCA: offset 0B, len 6B */
247 kex_cap.bit.dmac = npc_is_kex_enabled(npc, NPC_LID_LA, NPC_LT_LA_ETHER,
248 0 * 8, 6 * 8);
249 /* IP proto: offset 9B, len 1B */
250 kex_cap.bit.ip_proto =
251 npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP, 9 * 8, 1 * 8);
252 /* UDP dport: offset 2B, len 2B */
253 kex_cap.bit.udp_dport = npc_is_kex_enabled(npc, NPC_LID_LD,
254 NPC_LT_LD_UDP, 2 * 8, 2 * 8);
255 /* UDP sport: offset 0B, len 2B */
256 kex_cap.bit.udp_sport = npc_is_kex_enabled(npc, NPC_LID_LD,
257 NPC_LT_LD_UDP, 0 * 8, 2 * 8);
258 /* TCP dport: offset 2B, len 2B */
259 kex_cap.bit.tcp_dport = npc_is_kex_enabled(npc, NPC_LID_LD,
260 NPC_LT_LD_TCP, 2 * 8, 2 * 8);
261 /* TCP sport: offset 0B, len 2B */
262 kex_cap.bit.tcp_sport = npc_is_kex_enabled(npc, NPC_LID_LD,
263 NPC_LT_LD_TCP, 0 * 8, 2 * 8);
264 /* IP SIP: offset 12B, len 4B */
265 kex_cap.bit.sip_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP,
266 12 * 8, 4 * 8);
267 /* IP DIP: offset 14B, len 4B */
268 kex_cap.bit.dip_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP,
269 14 * 8, 4 * 8);
270 /* IP6 SIP: offset 8B, len 16B */
271 kex_cap.bit.sip6_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP6,
272 8 * 8, 16 * 8);
273 /* IP6 DIP: offset 24B, len 16B */
274 kex_cap.bit.dip6_addr = npc_is_kex_enabled(
275 npc, NPC_LID_LC, NPC_LT_LC_IP6, 24 * 8, 16 * 8);
276 /* ESP SPI: offset 0B, len 4B */
277 kex_cap.bit.ipsec_spi = npc_is_kex_enabled(npc, NPC_LID_LE,
278 NPC_LT_LE_ESP, 0 * 8, 4 * 8);
279 /* VXLAN VNI: offset 4B, len 3B */
280 kex_cap.bit.ld_vni = npc_is_kex_enabled(npc, NPC_LID_LE,
281 NPC_LT_LE_VXLAN, 0 * 8, 3 * 8);
282
283 /* Custom L3 frame: varied offset and lengths */
284 kex_cap.bit.custom_l3 =
285 npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_CUSTOM0, 0, 0);
286 kex_cap.bit.custom_l3 |= (uint64_t)npc_is_kex_enabled(npc, NPC_LID_LC,
287 NPC_LT_LC_CUSTOM1, 0, 0);
288 /* SCTP sport : offset 0B, len 2B */
289 kex_cap.bit.sctp_sport = npc_is_kex_enabled(
290 npc, NPC_LID_LD, NPC_LT_LD_SCTP, 0 * 8, 2 * 8);
291 /* SCTP dport : offset 2B, len 2B */
292 kex_cap.bit.sctp_dport = npc_is_kex_enabled(
293 npc, NPC_LID_LD, NPC_LT_LD_SCTP, 2 * 8, 2 * 8);
294 /* ICMP type : offset 0B, len 1B */
295 kex_cap.bit.icmp_type = npc_is_kex_enabled(
296 npc, NPC_LID_LD, NPC_LT_LD_ICMP, 0 * 8, 1 * 8);
297 /* ICMP code : offset 1B, len 1B */
298 kex_cap.bit.icmp_code = npc_is_kex_enabled(
299 npc, NPC_LID_LD, NPC_LT_LD_ICMP, 1 * 8, 1 * 8);
300 /* ICMP id : offset 4B, len 2B */
301 kex_cap.bit.icmp_id = npc_is_kex_enabled(npc, NPC_LID_LD,
302 NPC_LT_LD_ICMP, 4 * 8, 2 * 8);
303 /* IGMP grp_addr : offset 4B, len 4B */
304 kex_cap.bit.igmp_grp_addr = npc_is_kex_enabled(
305 npc, NPC_LID_LD, NPC_LT_LD_IGMP, 4 * 8, 4 * 8);
306 /* GTPU teid : offset 4B, len 4B */
307 kex_cap.bit.gtpu_teid = npc_is_kex_enabled(
308 npc, NPC_LID_LE, NPC_LT_LE_GTPU, 4 * 8, 4 * 8);
309 return kex_cap.all_bits;
310 }
311
312 #define BYTESM1_SHIFT 16
313 #define HDR_OFF_SHIFT 8
314 static void
npc_update_kex_info(struct npc_xtract_info * xtract_info,uint64_t val)315 npc_update_kex_info(struct npc_xtract_info *xtract_info, uint64_t val)
316 {
317 xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
318 xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
319 xtract_info->key_off = val & 0x3f;
320 xtract_info->enable = ((val >> 7) & 0x1);
321 xtract_info->flags_enable = ((val >> 6) & 0x1);
322 }
323
324 int
npc_mcam_alloc_entries(struct npc * npc,int ref_mcam,int * alloc_entry,int req_count,int prio,int * resp_count)325 npc_mcam_alloc_entries(struct npc *npc, int ref_mcam, int *alloc_entry,
326 int req_count, int prio, int *resp_count)
327 {
328 struct npc_mcam_alloc_entry_req *req;
329 struct npc_mcam_alloc_entry_rsp *rsp;
330 struct mbox *mbox = npc->mbox;
331 int rc = -ENOSPC;
332 int i;
333
334 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
335 if (req == NULL)
336 return rc;
337 req->contig = 0;
338 req->count = req_count;
339 req->priority = prio;
340 req->ref_entry = ref_mcam;
341
342 rc = mbox_process_msg(mbox, (void *)&rsp);
343 if (rc)
344 return rc;
345 for (i = 0; i < rsp->count; i++)
346 alloc_entry[i] = rsp->entry_list[i];
347 *resp_count = rsp->count;
348 return 0;
349 }
350
351 int
npc_mcam_alloc_entry(struct npc * npc,struct roc_npc_flow * mcam,struct roc_npc_flow * ref_mcam,int prio,int * resp_count)352 npc_mcam_alloc_entry(struct npc *npc, struct roc_npc_flow *mcam,
353 struct roc_npc_flow *ref_mcam, int prio, int *resp_count)
354 {
355 struct npc_mcam_alloc_entry_req *req;
356 struct npc_mcam_alloc_entry_rsp *rsp;
357 struct mbox *mbox = npc->mbox;
358 int rc = -ENOSPC;
359
360 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
361 if (req == NULL)
362 return rc;
363 req->contig = 1;
364 req->count = 1;
365 req->priority = prio;
366 req->ref_entry = ref_mcam->mcam_id;
367
368 rc = mbox_process_msg(mbox, (void *)&rsp);
369 if (rc)
370 return rc;
371 memset(mcam, 0, sizeof(struct roc_npc_flow));
372 mcam->mcam_id = rsp->entry;
373 mcam->nix_intf = ref_mcam->nix_intf;
374 *resp_count = rsp->count;
375 return 0;
376 }
377
378 int
npc_mcam_ena_dis_entry(struct npc * npc,struct roc_npc_flow * mcam,bool enable)379 npc_mcam_ena_dis_entry(struct npc *npc, struct roc_npc_flow *mcam, bool enable)
380 {
381 struct npc_mcam_ena_dis_entry_req *req;
382 struct mbox *mbox = npc->mbox;
383 int rc = -ENOSPC;
384
385 if (enable)
386 req = mbox_alloc_msg_npc_mcam_ena_entry(mbox);
387 else
388 req = mbox_alloc_msg_npc_mcam_dis_entry(mbox);
389
390 if (req == NULL)
391 return rc;
392 req->entry = mcam->mcam_id;
393 mcam->enable = enable;
394 return mbox_process(mbox);
395 }
396
397 int
npc_mcam_write_entry(struct npc * npc,struct roc_npc_flow * mcam)398 npc_mcam_write_entry(struct npc *npc, struct roc_npc_flow *mcam)
399 {
400 struct npc_mcam_write_entry_req *req;
401 struct mbox *mbox = npc->mbox;
402 struct mbox_msghdr *rsp;
403 int rc = -ENOSPC;
404 int i;
405
406 req = mbox_alloc_msg_npc_mcam_write_entry(mbox);
407 if (req == NULL)
408 return rc;
409 req->entry = mcam->mcam_id;
410 req->intf = mcam->nix_intf;
411 req->enable_entry = mcam->enable;
412 req->entry_data.action = mcam->npc_action;
413 req->entry_data.vtag_action = mcam->vtag_action;
414 for (i = 0; i < NPC_MCAM_KEY_X4_WORDS; i++) {
415 req->entry_data.kw[i] = mcam->mcam_data[i];
416 req->entry_data.kw_mask[i] = mcam->mcam_mask[i];
417 }
418 return mbox_process_msg(mbox, (void *)&rsp);
419 }
420
421 static void
npc_mcam_process_mkex_cfg(struct npc * npc,struct npc_get_kex_cfg_rsp * kex_rsp)422 npc_mcam_process_mkex_cfg(struct npc *npc, struct npc_get_kex_cfg_rsp *kex_rsp)
423 {
424 volatile uint64_t(
425 *q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
426 struct npc_xtract_info *x_info = NULL;
427 int lid, lt, ld, fl, ix;
428 npc_dxcfg_t *p;
429 uint64_t keyw;
430 uint64_t val;
431
432 npc->keyx_supp_nmask[NPC_MCAM_RX] =
433 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
434 npc->keyx_supp_nmask[NPC_MCAM_TX] =
435 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
436 npc->keyx_len[NPC_MCAM_RX] =
437 npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
438 npc->keyx_len[NPC_MCAM_TX] =
439 npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
440
441 keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
442 npc->keyw[NPC_MCAM_RX] = keyw;
443 keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
444 npc->keyw[NPC_MCAM_TX] = keyw;
445
446 /* Update KEX_LD_FLAG */
447 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
448 for (ld = 0; ld < NPC_MAX_LD; ld++) {
449 for (fl = 0; fl < NPC_MAX_LFL; fl++) {
450 x_info = &npc->prx_fxcfg[ix][ld][fl].xtract[0];
451 val = kex_rsp->intf_ld_flags[ix][ld][fl];
452 npc_update_kex_info(x_info, val);
453 }
454 }
455 }
456
457 /* Update LID, LT and LDATA cfg */
458 p = &npc->prx_dxcfg;
459 q = (volatile uint64_t(*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])(
460 &kex_rsp->intf_lid_lt_ld);
461 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
462 for (lid = 0; lid < NPC_MAX_LID; lid++) {
463 for (lt = 0; lt < NPC_MAX_LT; lt++) {
464 for (ld = 0; ld < NPC_MAX_LD; ld++) {
465 x_info = &(*p)[ix][lid][lt].xtract[ld];
466 val = (*q)[ix][lid][lt][ld];
467 npc_update_kex_info(x_info, val);
468 }
469 }
470 }
471 }
472 /* Update LDATA Flags cfg */
473 npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
474 npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
475 }
476
477 int
npc_mcam_fetch_kex_cfg(struct npc * npc)478 npc_mcam_fetch_kex_cfg(struct npc *npc)
479 {
480 struct npc_get_kex_cfg_rsp *kex_rsp;
481 struct mbox *mbox = npc->mbox;
482 int rc = 0;
483
484 mbox_alloc_msg_npc_get_kex_cfg(mbox);
485 rc = mbox_process_msg(mbox, (void *)&kex_rsp);
486 if (rc) {
487 plt_err("Failed to fetch NPC KEX config");
488 goto done;
489 }
490
491 mbox_memcpy((char *)npc->profile_name, kex_rsp->mkex_pfl_name,
492 MKEX_NAME_LEN);
493
494 npc_mcam_process_mkex_cfg(npc, kex_rsp);
495
496 done:
497 return rc;
498 }
499
500 static void
npc_mcam_set_channel(struct roc_npc_flow * flow,struct npc_mcam_write_entry_req * req,uint16_t channel,uint16_t chan_mask,bool is_second_pass)501 npc_mcam_set_channel(struct roc_npc_flow *flow,
502 struct npc_mcam_write_entry_req *req, uint16_t channel,
503 uint16_t chan_mask, bool is_second_pass)
504 {
505 uint16_t chan = 0, mask = 0;
506
507 req->entry_data.kw[0] &= ~(GENMASK(11, 0));
508 req->entry_data.kw_mask[0] &= ~(GENMASK(11, 0));
509 flow->mcam_data[0] &= ~(GENMASK(11, 0));
510 flow->mcam_mask[0] &= ~(GENMASK(11, 0));
511
512 if (is_second_pass) {
513 chan = (channel | NIX_CHAN_CPT_CH_START);
514 mask = (chan_mask | NIX_CHAN_CPT_CH_START);
515 } else {
516 /*
517 * Clear bits 10 & 11 corresponding to CPT
518 * channel. By default, rules should match
519 * both first pass packets and second pass
520 * packets from CPT.
521 */
522 chan = (channel & NIX_CHAN_CPT_X2P_MASK);
523 mask = (chan_mask & NIX_CHAN_CPT_X2P_MASK);
524 }
525
526 req->entry_data.kw[0] |= (uint64_t)chan;
527 req->entry_data.kw_mask[0] |= (uint64_t)mask;
528 flow->mcam_data[0] |= (uint64_t)chan;
529 flow->mcam_mask[0] |= (uint64_t)mask;
530 }
531
532 int
npc_mcam_alloc_and_write(struct npc * npc,struct roc_npc_flow * flow,struct npc_parse_state * pst)533 npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow,
534 struct npc_parse_state *pst)
535 {
536 int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1);
537 struct npc_mcam_write_entry_req *req;
538 struct nix_inl_dev *inl_dev = NULL;
539 struct mbox *mbox = npc->mbox;
540 struct mbox_msghdr *rsp;
541 struct idev_cfg *idev;
542 uint16_t pf_func = 0;
543 uint16_t ctr = ~(0);
544 int rc, idx;
545 int entry;
546
547 PLT_SET_USED(pst);
548
549 if (use_ctr) {
550 rc = npc_mcam_alloc_counter(npc, &ctr);
551 if (rc)
552 return rc;
553 }
554
555 entry = npc_get_free_mcam_entry(mbox, flow, npc);
556 if (entry < 0) {
557 if (use_ctr)
558 npc_mcam_free_counter(npc, ctr);
559 return NPC_ERR_MCAM_ALLOC;
560 }
561
562 req = mbox_alloc_msg_npc_mcam_write_entry(mbox);
563 if (req == NULL)
564 return -ENOSPC;
565 req->set_cntr = use_ctr;
566 req->cntr = ctr;
567 req->entry = entry;
568
569 req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
570 req->enable_entry = 1;
571 req->entry_data.action = flow->npc_action;
572
573 /*
574 * Driver sets vtag action on per interface basis, not
575 * per flow basis. It is a matter of how we decide to support
576 * this pmd specific behavior. There are two ways:
577 * 1. Inherit the vtag action from the one configured
578 * for this interface. This can be read from the
579 * vtag_action configured for default mcam entry of
580 * this pf_func.
581 * 2. Do not support vtag action with npc_flow.
582 *
583 * Second approach is used now.
584 */
585 req->entry_data.vtag_action = flow->vtag_action;
586
587 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
588 req->entry_data.kw[idx] = flow->mcam_data[idx];
589 req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
590 }
591
592 idev = idev_get_cfg();
593 if (idev)
594 inl_dev = idev->nix_inl_dev;
595
596 if (flow->nix_intf == NIX_INTF_RX) {
597 if (inl_dev && inl_dev->is_multi_channel &&
598 (flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) {
599 pf_func = nix_inl_dev_pffunc_get();
600 req->entry_data.action &= ~(GENMASK(19, 4));
601 req->entry_data.action |= (uint64_t)pf_func << 4;
602 flow->npc_action &= ~(GENMASK(19, 4));
603 flow->npc_action |= (uint64_t)pf_func << 4;
604
605 npc_mcam_set_channel(flow, req, inl_dev->channel,
606 inl_dev->chan_mask, false);
607 } else if (npc->is_sdp_link) {
608 npc_mcam_set_channel(flow, req, npc->sdp_channel,
609 npc->sdp_channel_mask,
610 pst->is_second_pass_rule);
611 } else {
612 npc_mcam_set_channel(flow, req, npc->channel,
613 (BIT_ULL(12) - 1),
614 pst->is_second_pass_rule);
615 }
616 } else {
617 uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
618
619 pf_func = plt_cpu_to_be_16(pf_func);
620 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
621 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
622
623 flow->mcam_data[0] |= ((uint64_t)pf_func << 32);
624 flow->mcam_mask[0] |= ((uint64_t)0xffff << 32);
625 }
626
627 rc = mbox_process_msg(mbox, (void *)&rsp);
628 if (rc != 0)
629 return rc;
630
631 flow->mcam_id = entry;
632
633 if (use_ctr)
634 flow->ctr_id = ctr;
635 return 0;
636 }
637
638 static void
npc_set_vlan_ltype(struct npc_parse_state * pst)639 npc_set_vlan_ltype(struct npc_parse_state *pst)
640 {
641 uint64_t val, mask;
642 uint8_t lb_offset;
643
644 lb_offset =
645 __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
646 ((1ULL << NPC_LTYPE_LB_OFFSET) - 1));
647 lb_offset *= 4;
648
649 mask = ~((0xfULL << lb_offset));
650 pst->flow->mcam_data[0] &= mask;
651 pst->flow->mcam_mask[0] &= mask;
652 /* NPC_LT_LB_CTAG: 0b0010, NPC_LT_LB_STAG_QINQ: 0b0011
653 * Set LB layertype/mask as 0b0010/0b1110 to match both.
654 */
655 val = ((uint64_t)(NPC_LT_LB_CTAG & NPC_LT_LB_STAG_QINQ)) << lb_offset;
656 pst->flow->mcam_data[0] |= val;
657 pst->flow->mcam_mask[0] |= (0xeULL << lb_offset);
658 }
659
660 static void
npc_set_ipv6ext_ltype_mask(struct npc_parse_state * pst)661 npc_set_ipv6ext_ltype_mask(struct npc_parse_state *pst)
662 {
663 uint8_t lc_offset, lcflag_offset;
664 uint64_t val, mask;
665
666 lc_offset =
667 __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
668 ((1ULL << NPC_LTYPE_LC_OFFSET) - 1));
669 lc_offset *= 4;
670
671 mask = ~((0xfULL << lc_offset));
672 pst->flow->mcam_data[0] &= mask;
673 pst->flow->mcam_mask[0] &= mask;
674 /* NPC_LT_LC_IP6: 0b0100, NPC_LT_LC_IP6_EXT: 0b0101
675 * Set LC layertype/mask as 0b0100/0b1110 to match both.
676 */
677 val = ((uint64_t)(NPC_LT_LC_IP6 & NPC_LT_LC_IP6_EXT)) << lc_offset;
678 pst->flow->mcam_data[0] |= val;
679 pst->flow->mcam_mask[0] |= (0xeULL << lc_offset);
680
681 /* If LC LFLAG is non-zero, set the LC LFLAG mask to 0xF. In general
682 * case flag mask is set same as the value in data. For example, to
683 * match 3 VLANs, flags have to match a range of values. But, for IPv6
684 * extended attributes matching, we need an exact match. Hence, set the
685 * mask as 0xF. This is done only if LC LFLAG value is non-zero,
686 * because for AH and ESP, LC LFLAG is zero and we don't want to match
687 * zero in LFLAG.
688 */
689 lcflag_offset =
690 __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
691 ((1ULL << NPC_LFLAG_LC_OFFSET) - 1));
692 lcflag_offset *= 4;
693
694 mask = (0xfULL << lcflag_offset);
695 val = pst->flow->mcam_data[0] & mask;
696 if (val)
697 pst->flow->mcam_mask[0] |= mask;
698 }
699
700 int
npc_program_mcam(struct npc * npc,struct npc_parse_state * pst,bool mcam_alloc)701 npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc)
702 {
703 struct npc_mcam_read_base_rule_rsp *base_rule_rsp;
704 /* This is non-LDATA part in search key */
705 uint64_t key_data[2] = {0ULL, 0ULL};
706 uint64_t key_mask[2] = {0ULL, 0ULL};
707 int key_len, bit = 0, index, rc = 0;
708 int intf = pst->flow->nix_intf;
709 struct mcam_entry *base_entry;
710 int off, idx, data_off = 0;
711 uint8_t lid, mask, data;
712 uint16_t layer_info;
713 uint64_t lt, flags;
714
715 /* Skip till Layer A data start */
716 while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
717 if (npc->keyx_supp_nmask[intf] & (1 << bit))
718 data_off++;
719 bit++;
720 }
721
722 /* Each bit represents 1 nibble */
723 data_off *= 4;
724
725 index = 0;
726 for (lid = 0; lid < NPC_MAX_LID; lid++) {
727 /* Offset in key */
728 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
729 lt = pst->lt[lid] & 0xf;
730 flags = pst->flags[lid] & 0xff;
731
732 /* NPC_LAYER_KEX_S */
733 layer_info = ((npc->keyx_supp_nmask[intf] >> off) & 0x7);
734
735 if (layer_info) {
736 for (idx = 0; idx <= 2; idx++) {
737 if (layer_info & (1 << idx)) {
738 if (idx == 2) {
739 data = lt;
740 mask = 0xf;
741 } else if (idx == 1) {
742 data = ((flags >> 4) & 0xf);
743 mask = ((flags >> 4) & 0xf);
744 } else {
745 data = (flags & 0xf);
746 mask = (flags & 0xf);
747 }
748
749 if (data_off >= 64) {
750 data_off = 0;
751 index++;
752 }
753 key_data[index] |=
754 ((uint64_t)data << data_off);
755
756 if (lt == 0)
757 mask = 0;
758 key_mask[index] |=
759 ((uint64_t)mask << data_off);
760 data_off += 4;
761 }
762 }
763 }
764 }
765
766 /* Copy this into mcam string */
767 key_len = (pst->npc->keyx_len[intf] + 7) / 8;
768 memcpy(pst->flow->mcam_data, key_data, key_len);
769 memcpy(pst->flow->mcam_mask, key_mask, key_len);
770
771 if (pst->set_vlan_ltype_mask)
772 npc_set_vlan_ltype(pst);
773
774 if (pst->set_ipv6ext_ltype_mask)
775 npc_set_ipv6ext_ltype_mask(pst);
776
777 if (pst->is_vf && pst->flow->nix_intf == NIX_INTF_RX) {
778 (void)mbox_alloc_msg_npc_read_base_steer_rule(npc->mbox);
779 rc = mbox_process_msg(npc->mbox, (void *)&base_rule_rsp);
780 if (rc) {
781 plt_err("Failed to fetch VF's base MCAM entry");
782 return rc;
783 }
784 base_entry = &base_rule_rsp->entry_data;
785 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
786 pst->flow->mcam_data[idx] |= base_entry->kw[idx];
787 pst->flow->mcam_mask[idx] |= base_entry->kw_mask[idx];
788 }
789 }
790
791 /*
792 * Now we have mcam data and mask formatted as
793 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
794 * hole is present if key_len is odd number of nibbles.
795 * mcam data must be split into 64 bits + 48 bits segments
796 * for each back W0, W1.
797 */
798
799 if (mcam_alloc)
800 return npc_mcam_alloc_and_write(npc, pst->flow, pst);
801 else
802 return 0;
803 }
804
805 int
npc_flow_enable_all_entries(struct npc * npc,bool enable)806 npc_flow_enable_all_entries(struct npc *npc, bool enable)
807 {
808 struct npc_flow_list *list;
809 struct roc_npc_flow *flow;
810 int rc = 0, idx;
811
812 /* Free any MCAM counters and delete flow list */
813 for (idx = 0; idx < npc->flow_max_priority; idx++) {
814 list = &npc->flow_list[idx];
815 TAILQ_FOREACH(flow, list, next) {
816 flow->enable = enable;
817 rc = npc_mcam_write_entry(npc, flow);
818 if (rc)
819 return rc;
820 }
821 }
822 return rc;
823 }
824
825 int
npc_flow_free_all_resources(struct npc * npc)826 npc_flow_free_all_resources(struct npc *npc)
827 {
828 struct roc_npc_flow *flow;
829 int rc, idx;
830
831 /* Free all MCAM entries allocated */
832 rc = npc_mcam_free_all_entries(npc);
833
834 /* Free any MCAM counters and delete flow list */
835 for (idx = 0; idx < npc->flow_max_priority; idx++) {
836 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
837 npc_rss_group_free(npc, flow);
838 if (flow->ctr_id != NPC_COUNTER_NONE)
839 rc |= npc_mcam_free_counter(npc, flow->ctr_id);
840
841 npc_delete_prio_list_entry(npc, flow);
842
843 TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
844 plt_free(flow);
845 }
846 }
847 return rc;
848 }
849