1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #include "otx2_ethdev.h"
6 #include "otx2_ethdev_sec.h"
7 #include "otx2_flow.h"
8
9 enum flow_vtag_cfg_dir { VTAG_TX, VTAG_RX };
10
11 int
otx2_flow_free_all_resources(struct otx2_eth_dev * hw)12 otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
13 {
14 struct otx2_npc_flow_info *npc = &hw->npc_flow;
15 struct otx2_mbox *mbox = hw->mbox;
16 struct otx2_mcam_ents_info *info;
17 struct rte_bitmap *bmap;
18 struct rte_flow *flow;
19 int entry_count = 0;
20 int rc, idx;
21
22 for (idx = 0; idx < npc->flow_max_priority; idx++) {
23 info = &npc->flow_entry_info[idx];
24 entry_count += info->live_ent;
25 }
26
27 if (entry_count == 0)
28 return 0;
29
30 /* Free all MCAM entries allocated */
31 rc = otx2_flow_mcam_free_all_entries(mbox);
32
33 /* Free any MCAM counters and delete flow list */
34 for (idx = 0; idx < npc->flow_max_priority; idx++) {
35 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
36 if (flow->ctr_id != NPC_COUNTER_NONE)
37 rc |= otx2_flow_mcam_free_counter(mbox,
38 flow->ctr_id);
39
40 TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
41 rte_free(flow);
42 bmap = npc->live_entries[flow->priority];
43 rte_bitmap_clear(bmap, flow->mcam_id);
44 }
45 info = &npc->flow_entry_info[idx];
46 info->free_ent = 0;
47 info->live_ent = 0;
48 }
49 return rc;
50 }
51
52
53 static int
flow_program_npc(struct otx2_parse_state * pst,struct otx2_mbox * mbox,struct otx2_npc_flow_info * flow_info)54 flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
55 struct otx2_npc_flow_info *flow_info)
56 {
57 /* This is non-LDATA part in search key */
58 uint64_t key_data[2] = {0ULL, 0ULL};
59 uint64_t key_mask[2] = {0ULL, 0ULL};
60 int intf = pst->flow->nix_intf;
61 int key_len, bit = 0, index;
62 int off, idx, data_off = 0;
63 uint8_t lid, mask, data;
64 uint16_t layer_info;
65 uint64_t lt, flags;
66
67
68 /* Skip till Layer A data start */
69 while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
70 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
71 data_off++;
72 bit++;
73 }
74
75 /* Each bit represents 1 nibble */
76 data_off *= 4;
77
78 index = 0;
79 for (lid = 0; lid < NPC_MAX_LID; lid++) {
80 /* Offset in key */
81 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
82 lt = pst->lt[lid] & 0xf;
83 flags = pst->flags[lid] & 0xff;
84
85 /* NPC_LAYER_KEX_S */
86 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
87
88 if (layer_info) {
89 for (idx = 0; idx <= 2 ; idx++) {
90 if (layer_info & (1 << idx)) {
91 if (idx == 2)
92 data = lt;
93 else if (idx == 1)
94 data = ((flags >> 4) & 0xf);
95 else
96 data = (flags & 0xf);
97
98 if (data_off >= 64) {
99 data_off = 0;
100 index++;
101 }
102 key_data[index] |= ((uint64_t)data <<
103 data_off);
104 mask = 0xf;
105 if (lt == 0)
106 mask = 0;
107 key_mask[index] |= ((uint64_t)mask <<
108 data_off);
109 data_off += 4;
110 }
111 }
112 }
113 }
114
115 otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
116 key_data[0], key_data[1]);
117
118 /* Copy this into mcam string */
119 key_len = (pst->npc->keyx_len[intf] + 7) / 8;
120 otx2_npc_dbg("Key_len = %d", key_len);
121 memcpy(pst->flow->mcam_data, key_data, key_len);
122 memcpy(pst->flow->mcam_mask, key_mask, key_len);
123
124 otx2_npc_dbg("Final flow data");
125 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
126 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
127 idx, pst->flow->mcam_data[idx],
128 idx, pst->flow->mcam_mask[idx]);
129 }
130
131 /*
132 * Now we have mcam data and mask formatted as
133 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
134 * hole is present if key_len is odd number of nibbles.
135 * mcam data must be split into 64 bits + 48 bits segments
136 * for each back W0, W1.
137 */
138
139 return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
140 }
141
142 static int
flow_parse_attr(struct rte_eth_dev * eth_dev,const struct rte_flow_attr * attr,struct rte_flow_error * error,struct rte_flow * flow)143 flow_parse_attr(struct rte_eth_dev *eth_dev,
144 const struct rte_flow_attr *attr,
145 struct rte_flow_error *error,
146 struct rte_flow *flow)
147 {
148 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
149 const char *errmsg = NULL;
150
151 if (attr == NULL)
152 errmsg = "Attribute can't be empty";
153 else if (attr->group)
154 errmsg = "Groups are not supported";
155 else if (attr->priority >= dev->npc_flow.flow_max_priority)
156 errmsg = "Priority should be with in specified range";
157 else if ((!attr->egress && !attr->ingress) ||
158 (attr->egress && attr->ingress))
159 errmsg = "Exactly one of ingress or egress must be set";
160
161 if (errmsg != NULL) {
162 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
163 attr, errmsg);
164 return -ENOTSUP;
165 }
166
167 if (attr->ingress)
168 flow->nix_intf = OTX2_INTF_RX;
169 else
170 flow->nix_intf = OTX2_INTF_TX;
171
172 flow->priority = attr->priority;
173 return 0;
174 }
175
176 static inline int
flow_get_free_rss_grp(struct rte_bitmap * bmap,uint32_t size,uint32_t * pos)177 flow_get_free_rss_grp(struct rte_bitmap *bmap,
178 uint32_t size, uint32_t *pos)
179 {
180 for (*pos = 0; *pos < size; ++*pos) {
181 if (!rte_bitmap_get(bmap, *pos))
182 break;
183 }
184
185 return *pos < size ? 0 : -1;
186 }
187
188 static int
flow_configure_rss_action(struct otx2_eth_dev * dev,const struct rte_flow_action_rss * rss,uint8_t * alg_idx,uint32_t * rss_grp,int mcam_index)189 flow_configure_rss_action(struct otx2_eth_dev *dev,
190 const struct rte_flow_action_rss *rss,
191 uint8_t *alg_idx, uint32_t *rss_grp,
192 int mcam_index)
193 {
194 struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
195 uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
196 uint32_t flowkey_cfg, grp_aval, i;
197 uint16_t *ind_tbl = NULL;
198 uint8_t flowkey_algx;
199 int rc;
200
201 rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
202 flow_info->rss_grps, &grp_aval);
203 /* RSS group :0 is not usable for flow rss action */
204 if (rc < 0 || grp_aval == 0)
205 return -ENOSPC;
206
207 *rss_grp = grp_aval;
208
209 otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
210 rss->key_len);
211
212 /* If queue count passed in the rss action is less than
213 * HW configured reta size, replicate rss action reta
214 * across HW reta table.
215 */
216 if (dev->rss_info.rss_size > rss->queue_num) {
217 ind_tbl = reta;
218
219 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
220 memcpy(reta + i * rss->queue_num, rss->queue,
221 sizeof(uint16_t) * rss->queue_num);
222
223 i = dev->rss_info.rss_size % rss->queue_num;
224 if (i)
225 memcpy(&reta[dev->rss_info.rss_size] - i,
226 rss->queue, i * sizeof(uint16_t));
227 } else {
228 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
229 }
230
231 rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
232 if (rc) {
233 otx2_err("Failed to init rss table rc = %d", rc);
234 return rc;
235 }
236
237 flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
238
239 rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
240 *rss_grp, mcam_index);
241 if (rc) {
242 otx2_err("Failed to set rss hash function rc = %d", rc);
243 return rc;
244 }
245
246 *alg_idx = flowkey_algx;
247
248 rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
249
250 return 0;
251 }
252
253
254 static int
flow_program_rss_action(struct rte_eth_dev * eth_dev,const struct rte_flow_action actions[],struct rte_flow * flow)255 flow_program_rss_action(struct rte_eth_dev *eth_dev,
256 const struct rte_flow_action actions[],
257 struct rte_flow *flow)
258 {
259 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
260 const struct rte_flow_action_rss *rss;
261 uint32_t rss_grp;
262 uint8_t alg_idx;
263 int rc;
264
265 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
266 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
267 rss = (const struct rte_flow_action_rss *)actions->conf;
268
269 rc = flow_configure_rss_action(dev,
270 rss, &alg_idx, &rss_grp,
271 flow->mcam_id);
272 if (rc)
273 return rc;
274
275 flow->npc_action &= (~(0xfULL));
276 flow->npc_action |= NIX_RX_ACTIONOP_RSS;
277 flow->npc_action |=
278 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
279 NIX_RSS_ACT_ALG_OFFSET) |
280 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
281 NIX_RSS_ACT_GRP_OFFSET);
282 }
283 }
284 return 0;
285 }
286
287 static int
flow_free_rss_action(struct rte_eth_dev * eth_dev,struct rte_flow * flow)288 flow_free_rss_action(struct rte_eth_dev *eth_dev,
289 struct rte_flow *flow)
290 {
291 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
292 struct otx2_npc_flow_info *npc = &dev->npc_flow;
293 uint32_t rss_grp;
294
295 if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
296 rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
297 NIX_RSS_ACT_GRP_MASK;
298 if (rss_grp == 0 || rss_grp >= npc->rss_grps)
299 return -EINVAL;
300
301 rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
302 }
303
304 return 0;
305 }
306
307 static int
flow_update_sec_tt(struct rte_eth_dev * eth_dev,const struct rte_flow_action actions[])308 flow_update_sec_tt(struct rte_eth_dev *eth_dev,
309 const struct rte_flow_action actions[])
310 {
311 int rc = 0;
312
313 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
314 if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
315 rc = otx2_eth_sec_update_tag_type(eth_dev);
316 break;
317 }
318 }
319
320 return rc;
321 }
322
323 static int
flow_parse_meta_items(__rte_unused struct otx2_parse_state * pst)324 flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
325 {
326 otx2_npc_dbg("Meta Item");
327 return 0;
328 }
329
330 /*
331 * Parse function of each layer:
332 * - Consume one or more patterns that are relevant.
333 * - Update parse_state
334 * - Set parse_state.pattern = last item consumed
335 * - Set appropriate error code/message when returning error.
336 */
337 typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
338
339 static int
flow_parse_pattern(struct rte_eth_dev * dev,const struct rte_flow_item pattern[],struct rte_flow_error * error,struct rte_flow * flow,struct otx2_parse_state * pst)340 flow_parse_pattern(struct rte_eth_dev *dev,
341 const struct rte_flow_item pattern[],
342 struct rte_flow_error *error,
343 struct rte_flow *flow,
344 struct otx2_parse_state *pst)
345 {
346 flow_parse_stage_func_t parse_stage_funcs[] = {
347 flow_parse_meta_items,
348 otx2_flow_parse_higig2_hdr,
349 otx2_flow_parse_la,
350 otx2_flow_parse_lb,
351 otx2_flow_parse_lc,
352 otx2_flow_parse_ld,
353 otx2_flow_parse_le,
354 otx2_flow_parse_lf,
355 otx2_flow_parse_lg,
356 otx2_flow_parse_lh,
357 };
358 struct otx2_eth_dev *hw = dev->data->dev_private;
359 uint8_t layer = 0;
360 int key_offset;
361 int rc;
362
363 if (pattern == NULL) {
364 rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
366 "pattern is NULL");
367 return -EINVAL;
368 }
369
370 memset(pst, 0, sizeof(*pst));
371 pst->npc = &hw->npc_flow;
372 pst->error = error;
373 pst->flow = flow;
374
375 /* Use integral byte offset */
376 key_offset = pst->npc->keyx_len[flow->nix_intf];
377 key_offset = (key_offset + 7) / 8;
378
379 /* Location where LDATA would begin */
380 pst->mcam_data = (uint8_t *)flow->mcam_data;
381 pst->mcam_mask = (uint8_t *)flow->mcam_mask;
382
383 while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
384 layer < RTE_DIM(parse_stage_funcs)) {
385 otx2_npc_dbg("Pattern type = %d", pattern->type);
386
387 /* Skip place-holders */
388 pattern = otx2_flow_skip_void_and_any_items(pattern);
389
390 pst->pattern = pattern;
391 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
392 rc = parse_stage_funcs[layer](pst);
393 if (rc != 0)
394 return -rte_errno;
395
396 layer++;
397
398 /*
399 * Parse stage function sets pst->pattern to
400 * 1 past the last item it consumed.
401 */
402 pattern = pst->pattern;
403
404 if (pst->terminate)
405 break;
406 }
407
408 /* Skip trailing place-holders */
409 pattern = otx2_flow_skip_void_and_any_items(pattern);
410
411 /* Are there more items than what we can handle? */
412 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
413 rte_flow_error_set(error, ENOTSUP,
414 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
415 "unsupported item in the sequence");
416 return -ENOTSUP;
417 }
418
419 return 0;
420 }
421
422 static int
flow_parse_rule(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,struct rte_flow * flow,struct otx2_parse_state * pst)423 flow_parse_rule(struct rte_eth_dev *dev,
424 const struct rte_flow_attr *attr,
425 const struct rte_flow_item pattern[],
426 const struct rte_flow_action actions[],
427 struct rte_flow_error *error,
428 struct rte_flow *flow,
429 struct otx2_parse_state *pst)
430 {
431 int err;
432
433 /* Check attributes */
434 err = flow_parse_attr(dev, attr, error, flow);
435 if (err)
436 return err;
437
438 /* Check actions */
439 err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
440 if (err)
441 return err;
442
443 /* Check pattern */
444 err = flow_parse_pattern(dev, pattern, error, flow, pst);
445 if (err)
446 return err;
447
448 /* Check for overlaps? */
449 return 0;
450 }
451
452 static int
otx2_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)453 otx2_flow_validate(struct rte_eth_dev *dev,
454 const struct rte_flow_attr *attr,
455 const struct rte_flow_item pattern[],
456 const struct rte_flow_action actions[],
457 struct rte_flow_error *error)
458 {
459 struct otx2_parse_state parse_state;
460 struct rte_flow flow;
461
462 memset(&flow, 0, sizeof(flow));
463 return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
464 &parse_state);
465 }
466
467 static int
flow_program_vtag_action(struct rte_eth_dev * eth_dev,const struct rte_flow_action actions[],struct rte_flow * flow)468 flow_program_vtag_action(struct rte_eth_dev *eth_dev,
469 const struct rte_flow_action actions[],
470 struct rte_flow *flow)
471 {
472 uint16_t vlan_id = 0, vlan_ethtype = RTE_ETHER_TYPE_VLAN;
473 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
474 union {
475 uint64_t reg;
476 struct nix_tx_vtag_action_s act;
477 } tx_vtag_action;
478 struct otx2_mbox *mbox = dev->mbox;
479 struct nix_vtag_config *vtag_cfg;
480 struct nix_vtag_config_rsp *rsp;
481 bool vlan_insert_action = false;
482 uint64_t rx_vtag_action = 0;
483 uint8_t vlan_pcp = 0;
484 int rc;
485
486 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
487 if (actions->type == RTE_FLOW_ACTION_TYPE_OF_POP_VLAN) {
488 if (dev->npc_flow.vtag_actions == 1) {
489 vtag_cfg =
490 otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
491 vtag_cfg->cfg_type = VTAG_RX;
492 vtag_cfg->rx.strip_vtag = 1;
493 /* Always capture */
494 vtag_cfg->rx.capture_vtag = 1;
495 vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
496 vtag_cfg->rx.vtag_type = 0;
497
498 rc = otx2_mbox_process(mbox);
499 if (rc)
500 return rc;
501 }
502
503 rx_vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
504 rx_vtag_action |= (NPC_LID_LB << 8);
505 rx_vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR;
506 flow->vtag_action = rx_vtag_action;
507 } else if (actions->type ==
508 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
509 const struct rte_flow_action_of_set_vlan_vid *vtag =
510 (const struct rte_flow_action_of_set_vlan_vid *)
511 actions->conf;
512 vlan_id = rte_be_to_cpu_16(vtag->vlan_vid);
513 if (vlan_id > 0xfff) {
514 otx2_err("Invalid vlan_id for set vlan action");
515 return -EINVAL;
516 }
517 vlan_insert_action = true;
518 } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) {
519 const struct rte_flow_action_of_push_vlan *ethtype =
520 (const struct rte_flow_action_of_push_vlan *)
521 actions->conf;
522 vlan_ethtype = rte_be_to_cpu_16(ethtype->ethertype);
523 if (vlan_ethtype != RTE_ETHER_TYPE_VLAN &&
524 vlan_ethtype != RTE_ETHER_TYPE_QINQ) {
525 otx2_err("Invalid ethtype specified for push"
526 " vlan action");
527 return -EINVAL;
528 }
529 vlan_insert_action = true;
530 } else if (actions->type ==
531 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
532 const struct rte_flow_action_of_set_vlan_pcp *pcp =
533 (const struct rte_flow_action_of_set_vlan_pcp *)
534 actions->conf;
535 vlan_pcp = pcp->vlan_pcp;
536 if (vlan_pcp > 0x7) {
537 otx2_err("Invalid PCP value for pcp action");
538 return -EINVAL;
539 }
540 vlan_insert_action = true;
541 }
542 }
543
544 if (vlan_insert_action) {
545 vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
546 vtag_cfg->cfg_type = VTAG_TX;
547 vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
548 vtag_cfg->tx.vtag0 =
549 ((vlan_ethtype << 16) | (vlan_pcp << 13) | vlan_id);
550 vtag_cfg->tx.cfg_vtag0 = 1;
551 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
552 if (rc)
553 return rc;
554
555 tx_vtag_action.reg = 0;
556 tx_vtag_action.act.vtag0_def = rsp->vtag0_idx;
557 if (tx_vtag_action.act.vtag0_def < 0) {
558 otx2_err("Failed to config TX VTAG action");
559 return -EINVAL;
560 }
561 tx_vtag_action.act.vtag0_lid = NPC_LID_LA;
562 tx_vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
563 tx_vtag_action.act.vtag0_relptr =
564 NIX_TX_VTAGACTION_VTAG0_RELPTR;
565 flow->vtag_action = tx_vtag_action.reg;
566 }
567 return 0;
568 }
569
570 static struct rte_flow *
otx2_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)571 otx2_flow_create(struct rte_eth_dev *dev,
572 const struct rte_flow_attr *attr,
573 const struct rte_flow_item pattern[],
574 const struct rte_flow_action actions[],
575 struct rte_flow_error *error)
576 {
577 struct otx2_eth_dev *hw = dev->data->dev_private;
578 struct otx2_parse_state parse_state;
579 struct otx2_mbox *mbox = hw->mbox;
580 struct rte_flow *flow, *flow_iter;
581 struct otx2_flow_list *list;
582 int rc;
583
584 flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
585 if (flow == NULL) {
586 rte_flow_error_set(error, ENOMEM,
587 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
588 NULL,
589 "Memory allocation failed");
590 return NULL;
591 }
592 memset(flow, 0, sizeof(*flow));
593
594 rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
595 &parse_state);
596 if (rc != 0)
597 goto err_exit;
598
599 rc = flow_program_vtag_action(dev, actions, flow);
600 if (rc != 0) {
601 rte_flow_error_set(error, EIO,
602 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
603 NULL,
604 "Failed to program vlan action");
605 goto err_exit;
606 }
607
608 parse_state.is_vf = otx2_dev_is_vf(hw);
609
610 rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
611 if (rc != 0) {
612 rte_flow_error_set(error, EIO,
613 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
614 NULL,
615 "Failed to insert filter");
616 goto err_exit;
617 }
618
619 rc = flow_program_rss_action(dev, actions, flow);
620 if (rc != 0) {
621 rte_flow_error_set(error, EIO,
622 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
623 NULL,
624 "Failed to program rss action");
625 goto err_exit;
626 }
627
628 if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
629 rc = flow_update_sec_tt(dev, actions);
630 if (rc != 0) {
631 rte_flow_error_set(error, EIO,
632 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
633 NULL,
634 "Failed to update tt with sec act");
635 goto err_exit;
636 }
637 }
638
639 list = &hw->npc_flow.flow_list[flow->priority];
640 /* List in ascending order of mcam entries */
641 TAILQ_FOREACH(flow_iter, list, next) {
642 if (flow_iter->mcam_id > flow->mcam_id) {
643 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
644 return flow;
645 }
646 }
647
648 TAILQ_INSERT_TAIL(list, flow, next);
649 return flow;
650
651 err_exit:
652 rte_free(flow);
653 return NULL;
654 }
655
656 static int
otx2_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)657 otx2_flow_destroy(struct rte_eth_dev *dev,
658 struct rte_flow *flow,
659 struct rte_flow_error *error)
660 {
661 struct otx2_eth_dev *hw = dev->data->dev_private;
662 struct otx2_npc_flow_info *npc = &hw->npc_flow;
663 struct otx2_mbox *mbox = hw->mbox;
664 struct rte_bitmap *bmap;
665 uint16_t match_id;
666 int rc;
667
668 match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
669 NIX_RX_ACT_MATCH_MASK;
670
671 if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
672 if (rte_atomic32_read(&npc->mark_actions) == 0)
673 return -EINVAL;
674
675 /* Clear mark offload flag if there are no more mark actions */
676 if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
677 hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
678 otx2_eth_set_rx_function(dev);
679 }
680 }
681
682 if (flow->nix_intf == OTX2_INTF_RX && flow->vtag_action) {
683 npc->vtag_actions--;
684 if (npc->vtag_actions == 0) {
685 if (hw->vlan_info.strip_on == 0) {
686 hw->rx_offload_flags &=
687 ~NIX_RX_OFFLOAD_VLAN_STRIP_F;
688 otx2_eth_set_rx_function(dev);
689 }
690 }
691 }
692
693 rc = flow_free_rss_action(dev, flow);
694 if (rc != 0) {
695 rte_flow_error_set(error, EIO,
696 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
697 NULL,
698 "Failed to free rss action");
699 }
700
701 rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
702 if (rc != 0) {
703 rte_flow_error_set(error, EIO,
704 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
705 NULL,
706 "Failed to destroy filter");
707 }
708
709 TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
710
711 bmap = npc->live_entries[flow->priority];
712 rte_bitmap_clear(bmap, flow->mcam_id);
713
714 rte_free(flow);
715 return 0;
716 }
717
718 static int
otx2_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)719 otx2_flow_flush(struct rte_eth_dev *dev,
720 struct rte_flow_error *error)
721 {
722 struct otx2_eth_dev *hw = dev->data->dev_private;
723 int rc;
724
725 rc = otx2_flow_free_all_resources(hw);
726 if (rc) {
727 otx2_err("Error when deleting NPC MCAM entries "
728 ", counters");
729 rte_flow_error_set(error, EIO,
730 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
731 NULL,
732 "Failed to flush filter");
733 return -rte_errno;
734 }
735
736 return 0;
737 }
738
739 static int
otx2_flow_isolate(struct rte_eth_dev * dev __rte_unused,int enable __rte_unused,struct rte_flow_error * error)740 otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
741 int enable __rte_unused,
742 struct rte_flow_error *error)
743 {
744 /*
745 * If we support, we need to un-install the default mcam
746 * entry for this port.
747 */
748
749 rte_flow_error_set(error, ENOTSUP,
750 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
751 NULL,
752 "Flow isolation not supported");
753
754 return -rte_errno;
755 }
756
757 static int
otx2_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * action,void * data,struct rte_flow_error * error)758 otx2_flow_query(struct rte_eth_dev *dev,
759 struct rte_flow *flow,
760 const struct rte_flow_action *action,
761 void *data,
762 struct rte_flow_error *error)
763 {
764 struct otx2_eth_dev *hw = dev->data->dev_private;
765 struct rte_flow_query_count *query = data;
766 struct otx2_mbox *mbox = hw->mbox;
767 const char *errmsg = NULL;
768 int errcode = ENOTSUP;
769 int rc;
770
771 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
772 errmsg = "Only COUNT is supported in query";
773 goto err_exit;
774 }
775
776 if (flow->ctr_id == NPC_COUNTER_NONE) {
777 errmsg = "Counter is not available";
778 goto err_exit;
779 }
780
781 rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
782 if (rc != 0) {
783 errcode = EIO;
784 errmsg = "Error reading flow counter";
785 goto err_exit;
786 }
787 query->hits_set = 1;
788 query->bytes_set = 0;
789
790 if (query->reset)
791 rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
792 if (rc != 0) {
793 errcode = EIO;
794 errmsg = "Error clearing flow counter";
795 goto err_exit;
796 }
797
798 return 0;
799
800 err_exit:
801 rte_flow_error_set(error, errcode,
802 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
803 NULL,
804 errmsg);
805 return -rte_errno;
806 }
807
808 const struct rte_flow_ops otx2_flow_ops = {
809 .validate = otx2_flow_validate,
810 .create = otx2_flow_create,
811 .destroy = otx2_flow_destroy,
812 .flush = otx2_flow_flush,
813 .query = otx2_flow_query,
814 .isolate = otx2_flow_isolate,
815 };
816
817 static int
flow_supp_key_len(uint32_t supp_mask)818 flow_supp_key_len(uint32_t supp_mask)
819 {
820 int nib_count = 0;
821 while (supp_mask) {
822 nib_count++;
823 supp_mask &= (supp_mask - 1);
824 }
825 return nib_count * 4;
826 }
827
828 /* Refer HRM register:
829 * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
830 * and
831 * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
832 **/
833 #define BYTESM1_SHIFT 16
834 #define HDR_OFF_SHIFT 8
835 static void
flow_update_kex_info(struct npc_xtract_info * xtract_info,uint64_t val)836 flow_update_kex_info(struct npc_xtract_info *xtract_info,
837 uint64_t val)
838 {
839 xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
840 xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
841 xtract_info->key_off = val & 0x3f;
842 xtract_info->enable = ((val >> 7) & 0x1);
843 xtract_info->flags_enable = ((val >> 6) & 0x1);
844 }
845
846 static void
flow_process_mkex_cfg(struct otx2_npc_flow_info * npc,struct npc_get_kex_cfg_rsp * kex_rsp)847 flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
848 struct npc_get_kex_cfg_rsp *kex_rsp)
849 {
850 volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
851 [NPC_MAX_LD];
852 struct npc_xtract_info *x_info = NULL;
853 int lid, lt, ld, fl, ix;
854 otx2_dxcfg_t *p;
855 uint64_t keyw;
856 uint64_t val;
857
858 npc->keyx_supp_nmask[NPC_MCAM_RX] =
859 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
860 npc->keyx_supp_nmask[NPC_MCAM_TX] =
861 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
862 npc->keyx_len[NPC_MCAM_RX] =
863 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
864 npc->keyx_len[NPC_MCAM_TX] =
865 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
866
867 keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
868 npc->keyw[NPC_MCAM_RX] = keyw;
869 keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
870 npc->keyw[NPC_MCAM_TX] = keyw;
871
872 /* Update KEX_LD_FLAG */
873 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
874 for (ld = 0; ld < NPC_MAX_LD; ld++) {
875 for (fl = 0; fl < NPC_MAX_LFL; fl++) {
876 x_info =
877 &npc->prx_fxcfg[ix][ld][fl].xtract[0];
878 val = kex_rsp->intf_ld_flags[ix][ld][fl];
879 flow_update_kex_info(x_info, val);
880 }
881 }
882 }
883
884 /* Update LID, LT and LDATA cfg */
885 p = &npc->prx_dxcfg;
886 q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
887 (&kex_rsp->intf_lid_lt_ld);
888 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
889 for (lid = 0; lid < NPC_MAX_LID; lid++) {
890 for (lt = 0; lt < NPC_MAX_LT; lt++) {
891 for (ld = 0; ld < NPC_MAX_LD; ld++) {
892 x_info = &(*p)[ix][lid][lt].xtract[ld];
893 val = (*q)[ix][lid][lt][ld];
894 flow_update_kex_info(x_info, val);
895 }
896 }
897 }
898 }
899 /* Update LDATA Flags cfg */
900 npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
901 npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
902 }
903
904 static struct otx2_idev_kex_cfg *
flow_intra_dev_kex_cfg(void)905 flow_intra_dev_kex_cfg(void)
906 {
907 static const char name[] = "octeontx2_intra_device_kex_conf";
908 struct otx2_idev_kex_cfg *idev;
909 const struct rte_memzone *mz;
910
911 mz = rte_memzone_lookup(name);
912 if (mz)
913 return mz->addr;
914
915 /* Request for the first time */
916 mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
917 SOCKET_ID_ANY, 0, OTX2_ALIGN);
918 if (mz) {
919 idev = mz->addr;
920 rte_atomic16_set(&idev->kex_refcnt, 0);
921 return idev;
922 }
923 return NULL;
924 }
925
926 static int
flow_fetch_kex_cfg(struct otx2_eth_dev * dev)927 flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
928 {
929 struct otx2_npc_flow_info *npc = &dev->npc_flow;
930 struct npc_get_kex_cfg_rsp *kex_rsp;
931 struct otx2_mbox *mbox = dev->mbox;
932 char mkex_pfl_name[MKEX_NAME_LEN];
933 struct otx2_idev_kex_cfg *idev;
934 int rc = 0;
935
936 idev = flow_intra_dev_kex_cfg();
937 if (!idev)
938 return -ENOMEM;
939
940 /* Is kex_cfg read by any another driver? */
941 if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
942 /* Call mailbox to get key & data size */
943 (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
944 otx2_mbox_msg_send(mbox, 0);
945 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
946 if (rc) {
947 otx2_err("Failed to fetch NPC keyx config");
948 goto done;
949 }
950 memcpy(&idev->kex_cfg, kex_rsp,
951 sizeof(struct npc_get_kex_cfg_rsp));
952 }
953
954 otx2_mbox_memcpy(mkex_pfl_name,
955 idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
956
957 strlcpy((char *)dev->mkex_pfl_name,
958 mkex_pfl_name, sizeof(dev->mkex_pfl_name));
959
960 flow_process_mkex_cfg(npc, &idev->kex_cfg);
961
962 done:
963 return rc;
964 }
965
966 int
otx2_flow_init(struct otx2_eth_dev * hw)967 otx2_flow_init(struct otx2_eth_dev *hw)
968 {
969 uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
970 struct otx2_npc_flow_info *npc = &hw->npc_flow;
971 uint32_t bmap_sz;
972 int rc = 0, idx;
973
974 rc = flow_fetch_kex_cfg(hw);
975 if (rc) {
976 otx2_err("Failed to fetch NPC keyx config from idev");
977 return rc;
978 }
979
980 rte_atomic32_init(&npc->mark_actions);
981 npc->vtag_actions = 0;
982
983 npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX];
984 /* Free, free_rev, live and live_rev entries */
985 bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
986 mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
987 RTE_CACHE_LINE_SIZE);
988 if (mem == NULL) {
989 otx2_err("Bmap alloc failed");
990 rc = -ENOMEM;
991 return rc;
992 }
993
994 npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
995 * sizeof(struct otx2_mcam_ents_info),
996 0);
997 if (npc->flow_entry_info == NULL) {
998 otx2_err("flow_entry_info alloc failed");
999 rc = -ENOMEM;
1000 goto err;
1001 }
1002
1003 npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
1004 * sizeof(struct rte_bitmap *),
1005 0);
1006 if (npc->free_entries == NULL) {
1007 otx2_err("free_entries alloc failed");
1008 rc = -ENOMEM;
1009 goto err;
1010 }
1011
1012 npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1013 * sizeof(struct rte_bitmap *),
1014 0);
1015 if (npc->free_entries_rev == NULL) {
1016 otx2_err("free_entries_rev alloc failed");
1017 rc = -ENOMEM;
1018 goto err;
1019 }
1020
1021 npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
1022 * sizeof(struct rte_bitmap *),
1023 0);
1024 if (npc->live_entries == NULL) {
1025 otx2_err("live_entries alloc failed");
1026 rc = -ENOMEM;
1027 goto err;
1028 }
1029
1030 npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1031 * sizeof(struct rte_bitmap *),
1032 0);
1033 if (npc->live_entries_rev == NULL) {
1034 otx2_err("live_entries_rev alloc failed");
1035 rc = -ENOMEM;
1036 goto err;
1037 }
1038
1039 npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
1040 * sizeof(struct otx2_flow_list),
1041 0);
1042 if (npc->flow_list == NULL) {
1043 otx2_err("flow_list alloc failed");
1044 rc = -ENOMEM;
1045 goto err;
1046 }
1047
1048 npc_mem = mem;
1049 for (idx = 0; idx < npc->flow_max_priority; idx++) {
1050 TAILQ_INIT(&npc->flow_list[idx]);
1051
1052 npc->free_entries[idx] =
1053 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1054 mem += bmap_sz;
1055
1056 npc->free_entries_rev[idx] =
1057 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1058 mem += bmap_sz;
1059
1060 npc->live_entries[idx] =
1061 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1062 mem += bmap_sz;
1063
1064 npc->live_entries_rev[idx] =
1065 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1066 mem += bmap_sz;
1067
1068 npc->flow_entry_info[idx].free_ent = 0;
1069 npc->flow_entry_info[idx].live_ent = 0;
1070 npc->flow_entry_info[idx].max_id = 0;
1071 npc->flow_entry_info[idx].min_id = ~(0);
1072 }
1073
1074 npc->rss_grps = NIX_RSS_GRPS;
1075
1076 bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
1077 nix_mem = rte_zmalloc(NULL, bmap_sz, RTE_CACHE_LINE_SIZE);
1078 if (nix_mem == NULL) {
1079 otx2_err("Bmap alloc failed");
1080 rc = -ENOMEM;
1081 goto err;
1082 }
1083
1084 npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
1085
1086 /* Group 0 will be used for RSS,
1087 * 1 -7 will be used for rte_flow RSS action
1088 */
1089 rte_bitmap_set(npc->rss_grp_entries, 0);
1090
1091 return 0;
1092
1093 err:
1094 if (npc->flow_list)
1095 rte_free(npc->flow_list);
1096 if (npc->live_entries_rev)
1097 rte_free(npc->live_entries_rev);
1098 if (npc->live_entries)
1099 rte_free(npc->live_entries);
1100 if (npc->free_entries_rev)
1101 rte_free(npc->free_entries_rev);
1102 if (npc->free_entries)
1103 rte_free(npc->free_entries);
1104 if (npc->flow_entry_info)
1105 rte_free(npc->flow_entry_info);
1106 if (npc_mem)
1107 rte_free(npc_mem);
1108 return rc;
1109 }
1110
1111 int
otx2_flow_fini(struct otx2_eth_dev * hw)1112 otx2_flow_fini(struct otx2_eth_dev *hw)
1113 {
1114 struct otx2_npc_flow_info *npc = &hw->npc_flow;
1115 int rc;
1116
1117 rc = otx2_flow_free_all_resources(hw);
1118 if (rc) {
1119 otx2_err("Error when deleting NPC MCAM entries, counters");
1120 return rc;
1121 }
1122
1123 if (npc->flow_list)
1124 rte_free(npc->flow_list);
1125 if (npc->live_entries_rev)
1126 rte_free(npc->live_entries_rev);
1127 if (npc->live_entries)
1128 rte_free(npc->live_entries);
1129 if (npc->free_entries_rev)
1130 rte_free(npc->free_entries_rev);
1131 if (npc->free_entries)
1132 rte_free(npc->free_entries);
1133 if (npc->flow_entry_info)
1134 rte_free(npc->flow_entry_info);
1135
1136 return 0;
1137 }
1138