1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include "otx2_ethdev.h"
6 #include "otx2_flow.h"
7 
8 static int
flow_mcam_alloc_counter(struct otx2_mbox * mbox,uint16_t * ctr)9 flow_mcam_alloc_counter(struct otx2_mbox *mbox, uint16_t *ctr)
10 {
11 	struct npc_mcam_alloc_counter_req *req;
12 	struct npc_mcam_alloc_counter_rsp *rsp;
13 	int rc;
14 
15 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_counter(mbox);
16 	req->count = 1;
17 	otx2_mbox_msg_send(mbox, 0);
18 	rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
19 
20 	*ctr = rsp->cntr_list[0];
21 	return rc;
22 }
23 
24 int
otx2_flow_mcam_free_counter(struct otx2_mbox * mbox,uint16_t ctr_id)25 otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id)
26 {
27 	struct npc_mcam_oper_counter_req *req;
28 	int rc;
29 
30 	req = otx2_mbox_alloc_msg_npc_mcam_free_counter(mbox);
31 	req->cntr = ctr_id;
32 	otx2_mbox_msg_send(mbox, 0);
33 	rc = otx2_mbox_get_rsp(mbox, 0, NULL);
34 
35 	return rc;
36 }
37 
38 int
otx2_flow_mcam_read_counter(struct otx2_mbox * mbox,uint32_t ctr_id,uint64_t * count)39 otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id,
40 			    uint64_t *count)
41 {
42 	struct npc_mcam_oper_counter_req *req;
43 	struct npc_mcam_oper_counter_rsp *rsp;
44 	int rc;
45 
46 	req = otx2_mbox_alloc_msg_npc_mcam_counter_stats(mbox);
47 	req->cntr = ctr_id;
48 	otx2_mbox_msg_send(mbox, 0);
49 	rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
50 
51 	*count = rsp->stat;
52 	return rc;
53 }
54 
55 int
otx2_flow_mcam_clear_counter(struct otx2_mbox * mbox,uint32_t ctr_id)56 otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id)
57 {
58 	struct npc_mcam_oper_counter_req *req;
59 	int rc;
60 
61 	req = otx2_mbox_alloc_msg_npc_mcam_clear_counter(mbox);
62 	req->cntr = ctr_id;
63 	otx2_mbox_msg_send(mbox, 0);
64 	rc = otx2_mbox_get_rsp(mbox, 0, NULL);
65 
66 	return rc;
67 }
68 
69 int
otx2_flow_mcam_free_entry(struct otx2_mbox * mbox,uint32_t entry)70 otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry)
71 {
72 	struct npc_mcam_free_entry_req *req;
73 	int rc;
74 
75 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
76 	req->entry = entry;
77 	otx2_mbox_msg_send(mbox, 0);
78 	rc = otx2_mbox_get_rsp(mbox, 0, NULL);
79 
80 	return rc;
81 }
82 
83 int
otx2_flow_mcam_free_all_entries(struct otx2_mbox * mbox)84 otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox)
85 {
86 	struct npc_mcam_free_entry_req *req;
87 	int rc;
88 
89 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
90 	req->all = 1;
91 	otx2_mbox_msg_send(mbox, 0);
92 	rc = otx2_mbox_get_rsp(mbox, 0, NULL);
93 
94 	return rc;
95 }
96 
97 static void
flow_prep_mcam_ldata(uint8_t * ptr,const uint8_t * data,int len)98 flow_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
99 {
100 	int idx;
101 
102 	for (idx = 0; idx < len; idx++)
103 		ptr[idx] = data[len - 1 - idx];
104 }
105 
106 static int
flow_check_copysz(size_t size,size_t len)107 flow_check_copysz(size_t size, size_t len)
108 {
109 	if (len <= size)
110 		return len;
111 	return -1;
112 }
113 
114 static inline int
flow_mem_is_zero(const void * mem,int len)115 flow_mem_is_zero(const void *mem, int len)
116 {
117 	const char *m = mem;
118 	int i;
119 
120 	for (i = 0; i < len; i++) {
121 		if (m[i] != 0)
122 			return 0;
123 	}
124 	return 1;
125 }
126 
127 static void
flow_set_hw_mask(struct otx2_flow_item_info * info,struct npc_xtract_info * xinfo,char * hw_mask)128 flow_set_hw_mask(struct otx2_flow_item_info *info,
129 		 struct npc_xtract_info *xinfo,
130 		 char *hw_mask)
131 {
132 	int max_off, offset;
133 	int j;
134 
135 	if (xinfo->enable == 0)
136 		return;
137 
138 	if (xinfo->hdr_off < info->hw_hdr_len)
139 		return;
140 
141 	max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
142 
143 	if (max_off > info->len)
144 		max_off = info->len;
145 
146 	offset = xinfo->hdr_off - info->hw_hdr_len;
147 	for (j = offset; j < max_off; j++)
148 		hw_mask[j] = 0xff;
149 }
150 
151 void
otx2_flow_get_hw_supp_mask(struct otx2_parse_state * pst,struct otx2_flow_item_info * info,int lid,int lt)152 otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst,
153 			   struct otx2_flow_item_info *info, int lid, int lt)
154 {
155 	struct npc_xtract_info *xinfo, *lfinfo;
156 	char *hw_mask = info->hw_mask;
157 	int lf_cfg;
158 	int i, j;
159 	int intf;
160 
161 	intf = pst->flow->nix_intf;
162 	xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
163 	memset(hw_mask, 0, info->len);
164 
165 	for (i = 0; i < NPC_MAX_LD; i++) {
166 		flow_set_hw_mask(info, &xinfo[i], hw_mask);
167 	}
168 
169 	for (i = 0; i < NPC_MAX_LD; i++) {
170 
171 		if (xinfo[i].flags_enable == 0)
172 			continue;
173 
174 		lf_cfg = pst->npc->prx_lfcfg[i].i;
175 		if (lf_cfg == lid) {
176 			for (j = 0; j < NPC_MAX_LFL; j++) {
177 				lfinfo = pst->npc->prx_fxcfg[intf]
178 					[i][j].xtract;
179 				flow_set_hw_mask(info, &lfinfo[0], hw_mask);
180 			}
181 		}
182 	}
183 }
184 
185 static int
flow_update_extraction_data(struct otx2_parse_state * pst,struct otx2_flow_item_info * info,struct npc_xtract_info * xinfo)186 flow_update_extraction_data(struct otx2_parse_state *pst,
187 			    struct otx2_flow_item_info *info,
188 			    struct npc_xtract_info *xinfo)
189 {
190 	uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
191 	uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
192 	struct npc_xtract_info *x;
193 	int k, idx, hdr_off;
194 	int len = 0;
195 
196 	x = xinfo;
197 	len = x->len;
198 	hdr_off = x->hdr_off;
199 
200 	if (hdr_off < info->hw_hdr_len)
201 		return 0;
202 
203 	if (x->enable == 0)
204 		return 0;
205 
206 	otx2_npc_dbg("x->hdr_off = %d, len = %d, info->len = %d,"
207 		     "x->key_off = %d", x->hdr_off, len, info->len,
208 		     x->key_off);
209 
210 	hdr_off -= info->hw_hdr_len;
211 
212 	if (hdr_off + len > info->len)
213 		len = info->len - hdr_off;
214 
215 	/* Check for over-write of previous layer */
216 	if (!flow_mem_is_zero(pst->mcam_mask + x->key_off,
217 			      len)) {
218 		/* Cannot support this data match */
219 		rte_flow_error_set(pst->error, ENOTSUP,
220 				   RTE_FLOW_ERROR_TYPE_ITEM,
221 				   pst->pattern,
222 				   "Extraction unsupported");
223 		return -rte_errno;
224 	}
225 
226 	len = flow_check_copysz((OTX2_MAX_MCAM_WIDTH_DWORDS * 8)
227 				- x->key_off,
228 				len);
229 	if (len < 0) {
230 		rte_flow_error_set(pst->error, ENOTSUP,
231 				   RTE_FLOW_ERROR_TYPE_ITEM,
232 				   pst->pattern,
233 				   "Internal Error");
234 		return -rte_errno;
235 	}
236 
237 	/* Need to reverse complete structure so that dest addr is at
238 	 * MSB so as to program the MCAM using mcam_data & mcam_mask
239 	 * arrays
240 	 */
241 	flow_prep_mcam_ldata(int_info,
242 			     (const uint8_t *)info->spec + hdr_off,
243 			     x->len);
244 	flow_prep_mcam_ldata(int_info_mask,
245 			     (const uint8_t *)info->mask + hdr_off,
246 			     x->len);
247 
248 	otx2_npc_dbg("Spec: ");
249 	for (k = 0; k < info->len; k++)
250 		otx2_npc_dbg("0x%.2x ",
251 			     ((const uint8_t *)info->spec)[k]);
252 
253 	otx2_npc_dbg("Int_info: ");
254 	for (k = 0; k < info->len; k++)
255 		otx2_npc_dbg("0x%.2x ", int_info[k]);
256 
257 	memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
258 	memcpy(pst->mcam_data + x->key_off, int_info, len);
259 
260 	otx2_npc_dbg("Parse state mcam data & mask");
261 	for (idx = 0; idx < len ; idx++)
262 		otx2_npc_dbg("data[%d]: 0x%x, mask[%d]: 0x%x", idx,
263 			     *(pst->mcam_data + idx + x->key_off), idx,
264 			     *(pst->mcam_mask + idx + x->key_off));
265 	return 0;
266 }
267 
268 int
otx2_flow_update_parse_state(struct otx2_parse_state * pst,struct otx2_flow_item_info * info,int lid,int lt,uint8_t flags)269 otx2_flow_update_parse_state(struct otx2_parse_state *pst,
270 			     struct otx2_flow_item_info *info, int lid, int lt,
271 			     uint8_t flags)
272 {
273 	struct npc_lid_lt_xtract_info *xinfo;
274 	struct npc_xtract_info *lfinfo;
275 	int intf, lf_cfg;
276 	int i, j, rc = 0;
277 
278 	otx2_npc_dbg("Parse state function info mask total %s",
279 		     (const uint8_t *)info->mask);
280 
281 	pst->layer_mask |= lid;
282 	pst->lt[lid] = lt;
283 	pst->flags[lid] = flags;
284 
285 	intf = pst->flow->nix_intf;
286 	xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
287 	otx2_npc_dbg("Is_terminating = %d", xinfo->is_terminating);
288 	if (xinfo->is_terminating)
289 		pst->terminate = 1;
290 
291 	if (info->spec == NULL) {
292 		otx2_npc_dbg("Info spec NULL");
293 		goto done;
294 	}
295 
296 	for (i = 0; i < NPC_MAX_LD; i++) {
297 		rc = flow_update_extraction_data(pst, info, &xinfo->xtract[i]);
298 		if (rc != 0)
299 			return rc;
300 	}
301 
302 	for (i = 0; i < NPC_MAX_LD; i++) {
303 		if (xinfo->xtract[i].flags_enable == 0)
304 			continue;
305 
306 		lf_cfg = pst->npc->prx_lfcfg[i].i;
307 		if (lf_cfg == lid) {
308 			for (j = 0; j < NPC_MAX_LFL; j++) {
309 				lfinfo = pst->npc->prx_fxcfg[intf]
310 					[i][j].xtract;
311 				rc = flow_update_extraction_data(pst, info,
312 								 &lfinfo[0]);
313 				if (rc != 0)
314 					return rc;
315 
316 				if (lfinfo[0].enable)
317 					pst->flags[lid] = j;
318 			}
319 		}
320 	}
321 
322 done:
323 	/* Next pattern to parse by subsequent layers */
324 	pst->pattern++;
325 	return 0;
326 }
327 
328 static inline int
flow_range_is_valid(const char * spec,const char * last,const char * mask,int len)329 flow_range_is_valid(const char *spec, const char *last, const char *mask,
330 		    int len)
331 {
332 	/* Mask must be zero or equal to spec as we do not support
333 	 * non-contiguous ranges.
334 	 */
335 	while (len--) {
336 		if (last[len] &&
337 		    (spec[len] & mask[len]) != (last[len] & mask[len]))
338 			return 0; /* False */
339 	}
340 	return 1;
341 }
342 
343 
344 static inline int
flow_mask_is_supported(const char * mask,const char * hw_mask,int len)345 flow_mask_is_supported(const char *mask, const char *hw_mask, int len)
346 {
347 	/*
348 	 * If no hw_mask, assume nothing is supported.
349 	 * mask is never NULL
350 	 */
351 	if (hw_mask == NULL)
352 		return flow_mem_is_zero(mask, len);
353 
354 	while (len--) {
355 		if ((mask[len] | hw_mask[len]) != hw_mask[len])
356 			return 0; /* False */
357 	}
358 	return 1;
359 }
360 
361 int
otx2_flow_parse_item_basic(const struct rte_flow_item * item,struct otx2_flow_item_info * info,struct rte_flow_error * error)362 otx2_flow_parse_item_basic(const struct rte_flow_item *item,
363 			   struct otx2_flow_item_info *info,
364 			   struct rte_flow_error *error)
365 {
366 	/* Item must not be NULL */
367 	if (item == NULL) {
368 		rte_flow_error_set(error, EINVAL,
369 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
370 				   "Item is NULL");
371 		return -rte_errno;
372 	}
373 	/* If spec is NULL, both mask and last must be NULL, this
374 	 * makes it to match ANY value (eq to mask = 0).
375 	 * Setting either mask or last without spec is an error
376 	 */
377 	if (item->spec == NULL) {
378 		if (item->last == NULL && item->mask == NULL) {
379 			info->spec = NULL;
380 			return 0;
381 		}
382 		rte_flow_error_set(error, EINVAL,
383 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
384 				   "mask or last set without spec");
385 		return -rte_errno;
386 	}
387 
388 	/* We have valid spec */
389 	info->spec = item->spec;
390 
391 	/* If mask is not set, use default mask, err if default mask is
392 	 * also NULL.
393 	 */
394 	if (item->mask == NULL) {
395 		otx2_npc_dbg("Item mask null, using default mask");
396 		if (info->def_mask == NULL) {
397 			rte_flow_error_set(error, EINVAL,
398 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
399 					   "No mask or default mask given");
400 			return -rte_errno;
401 		}
402 		info->mask = info->def_mask;
403 	} else {
404 		info->mask = item->mask;
405 	}
406 
407 	/* mask specified must be subset of hw supported mask
408 	 * mask | hw_mask == hw_mask
409 	 */
410 	if (!flow_mask_is_supported(info->mask, info->hw_mask, info->len)) {
411 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
412 				   item, "Unsupported field in the mask");
413 		return -rte_errno;
414 	}
415 
416 	/* Now we have spec and mask. OTX2 does not support non-contiguous
417 	 * range. We should have either:
418 	 * - spec & mask == last & mask or,
419 	 * - last == 0 or,
420 	 * - last == NULL
421 	 */
422 	if (item->last != NULL && !flow_mem_is_zero(item->last, info->len)) {
423 		if (!flow_range_is_valid(item->spec, item->last, info->mask,
424 					 info->len)) {
425 			rte_flow_error_set(error, EINVAL,
426 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
427 					   "Unsupported range for match");
428 			return -rte_errno;
429 		}
430 	}
431 
432 	return 0;
433 }
434 
435 void
otx2_flow_keyx_compress(uint64_t * data,uint32_t nibble_mask)436 otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask)
437 {
438 	uint64_t cdata[2] = {0ULL, 0ULL}, nibble;
439 	int i, j = 0;
440 
441 	for (i = 0; i < NPC_MAX_KEY_NIBBLES; i++) {
442 		if (nibble_mask & (1 << i)) {
443 			nibble = (data[i / 16] >> ((i & 0xf) * 4)) & 0xf;
444 			cdata[j / 16] |= (nibble << ((j & 0xf) * 4));
445 			j += 1;
446 		}
447 	}
448 
449 	data[0] = cdata[0];
450 	data[1] = cdata[1];
451 }
452 
453 static int
flow_first_set_bit(uint64_t slab)454 flow_first_set_bit(uint64_t slab)
455 {
456 	int num = 0;
457 
458 	if ((slab & 0xffffffff) == 0) {
459 		num += 32;
460 		slab >>= 32;
461 	}
462 	if ((slab & 0xffff) == 0) {
463 		num += 16;
464 		slab >>= 16;
465 	}
466 	if ((slab & 0xff) == 0) {
467 		num += 8;
468 		slab >>= 8;
469 	}
470 	if ((slab & 0xf) == 0) {
471 		num += 4;
472 		slab >>= 4;
473 	}
474 	if ((slab & 0x3) == 0) {
475 		num += 2;
476 		slab >>= 2;
477 	}
478 	if ((slab & 0x1) == 0)
479 		num += 1;
480 
481 	return num;
482 }
483 
484 static int
flow_shift_lv_ent(struct otx2_mbox * mbox,struct rte_flow * flow,struct otx2_npc_flow_info * flow_info,uint32_t old_ent,uint32_t new_ent)485 flow_shift_lv_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
486 		  struct otx2_npc_flow_info *flow_info,
487 		  uint32_t old_ent, uint32_t new_ent)
488 {
489 	struct npc_mcam_shift_entry_req *req;
490 	struct npc_mcam_shift_entry_rsp *rsp;
491 	struct otx2_flow_list *list;
492 	struct rte_flow *flow_iter;
493 	int rc = 0;
494 
495 	otx2_npc_dbg("Old ent:%u new ent:%u priority:%u", old_ent, new_ent,
496 		     flow->priority);
497 
498 	list = &flow_info->flow_list[flow->priority];
499 
500 	/* Old entry is disabled & it's contents are moved to new_entry,
501 	 * new entry is enabled finally.
502 	 */
503 	req = otx2_mbox_alloc_msg_npc_mcam_shift_entry(mbox);
504 	req->curr_entry[0] = old_ent;
505 	req->new_entry[0] = new_ent;
506 	req->shift_count = 1;
507 
508 	otx2_mbox_msg_send(mbox, 0);
509 	rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
510 	if (rc)
511 		return rc;
512 
513 	/* Remove old node from list */
514 	TAILQ_FOREACH(flow_iter, list, next) {
515 		if (flow_iter->mcam_id == old_ent)
516 			TAILQ_REMOVE(list, flow_iter, next);
517 	}
518 
519 	/* Insert node with new mcam id at right place */
520 	TAILQ_FOREACH(flow_iter, list, next) {
521 		if (flow_iter->mcam_id > new_ent)
522 			TAILQ_INSERT_BEFORE(flow_iter, flow, next);
523 	}
524 	return rc;
525 }
526 
527 /* Exchange all required entries with a given priority level */
528 static int
flow_shift_ent(struct otx2_mbox * mbox,struct rte_flow * flow,struct otx2_npc_flow_info * flow_info,struct npc_mcam_alloc_entry_rsp * rsp,int dir,int prio_lvl)529 flow_shift_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
530 	       struct otx2_npc_flow_info *flow_info,
531 	       struct npc_mcam_alloc_entry_rsp *rsp, int dir, int prio_lvl)
532 {
533 	struct rte_bitmap *fr_bmp, *fr_bmp_rev, *lv_bmp, *lv_bmp_rev, *bmp;
534 	uint32_t e_fr = 0, e_lv = 0, e, e_id = 0, mcam_entries;
535 	uint64_t fr_bit_pos = 0, lv_bit_pos = 0, bit_pos = 0;
536 	/* Bit position within the slab */
537 	uint32_t sl_fr_bit_off = 0, sl_lv_bit_off = 0;
538 	/* Overall bit position of the start of slab */
539 	/* free & live entry index */
540 	int rc_fr = 0, rc_lv = 0, rc = 0, idx = 0;
541 	struct otx2_mcam_ents_info *ent_info;
542 	/* free & live bitmap slab */
543 	uint64_t sl_fr = 0, sl_lv = 0, *sl;
544 
545 	fr_bmp = flow_info->free_entries[prio_lvl];
546 	fr_bmp_rev = flow_info->free_entries_rev[prio_lvl];
547 	lv_bmp = flow_info->live_entries[prio_lvl];
548 	lv_bmp_rev = flow_info->live_entries_rev[prio_lvl];
549 	ent_info = &flow_info->flow_entry_info[prio_lvl];
550 	mcam_entries = flow_info->mcam_entries;
551 
552 
553 	/* New entries allocated are always contiguous, but older entries
554 	 * already in free/live bitmap can be non-contiguous: so return
555 	 * shifted entries should be in non-contiguous format.
556 	 */
557 	while (idx <= rsp->count) {
558 		if (!sl_fr && !sl_lv) {
559 			/* Lower index elements to be exchanged */
560 			if (dir < 0) {
561 				rc_fr = rte_bitmap_scan(fr_bmp, &e_fr, &sl_fr);
562 				rc_lv = rte_bitmap_scan(lv_bmp, &e_lv, &sl_lv);
563 				otx2_npc_dbg("Fwd slab rc fr %u rc lv %u "
564 					     "e_fr %u e_lv %u", rc_fr, rc_lv,
565 					      e_fr, e_lv);
566 			} else {
567 				rc_fr = rte_bitmap_scan(fr_bmp_rev,
568 							&sl_fr_bit_off,
569 							&sl_fr);
570 				rc_lv = rte_bitmap_scan(lv_bmp_rev,
571 							&sl_lv_bit_off,
572 							&sl_lv);
573 
574 				otx2_npc_dbg("Rev slab rc fr %u rc lv %u "
575 					     "e_fr %u e_lv %u", rc_fr, rc_lv,
576 					      e_fr, e_lv);
577 			}
578 		}
579 
580 		if (rc_fr) {
581 			fr_bit_pos = flow_first_set_bit(sl_fr);
582 			e_fr = sl_fr_bit_off + fr_bit_pos;
583 			otx2_npc_dbg("Fr_bit_pos 0x%" PRIx64, fr_bit_pos);
584 		} else {
585 			e_fr = ~(0);
586 		}
587 
588 		if (rc_lv) {
589 			lv_bit_pos = flow_first_set_bit(sl_lv);
590 			e_lv = sl_lv_bit_off + lv_bit_pos;
591 			otx2_npc_dbg("Lv_bit_pos 0x%" PRIx64, lv_bit_pos);
592 		} else {
593 			e_lv = ~(0);
594 		}
595 
596 		/* First entry is from free_bmap */
597 		if (e_fr < e_lv) {
598 			bmp = fr_bmp;
599 			e = e_fr;
600 			sl = &sl_fr;
601 			bit_pos = fr_bit_pos;
602 			if (dir > 0)
603 				e_id = mcam_entries - e - 1;
604 			else
605 				e_id = e;
606 			otx2_npc_dbg("Fr e %u e_id %u", e, e_id);
607 		} else {
608 			bmp = lv_bmp;
609 			e = e_lv;
610 			sl = &sl_lv;
611 			bit_pos = lv_bit_pos;
612 			if (dir > 0)
613 				e_id = mcam_entries - e - 1;
614 			else
615 				e_id = e;
616 
617 			otx2_npc_dbg("Lv e %u e_id %u", e, e_id);
618 			if (idx < rsp->count)
619 				rc =
620 				  flow_shift_lv_ent(mbox, flow,
621 						    flow_info, e_id,
622 						    rsp->entry + idx);
623 		}
624 
625 		rte_bitmap_clear(bmp, e);
626 		rte_bitmap_set(bmp, rsp->entry + idx);
627 		/* Update entry list, use non-contiguous
628 		 * list now.
629 		 */
630 		rsp->entry_list[idx] = e_id;
631 		*sl &= ~(1 << bit_pos);
632 
633 		/* Update min & max entry identifiers in current
634 		 * priority level.
635 		 */
636 		if (dir < 0) {
637 			ent_info->max_id = rsp->entry + idx;
638 			ent_info->min_id = e_id;
639 		} else {
640 			ent_info->max_id = e_id;
641 			ent_info->min_id = rsp->entry;
642 		}
643 
644 		idx++;
645 	}
646 	return rc;
647 }
648 
649 /* Validate if newly allocated entries lie in the correct priority zone
650  * since NPC_MCAM_LOWER_PRIO & NPC_MCAM_HIGHER_PRIO don't ensure zone accuracy.
651  * If not properly aligned, shift entries to do so
652  */
653 static int
flow_validate_and_shift_prio_ent(struct otx2_mbox * mbox,struct rte_flow * flow,struct otx2_npc_flow_info * flow_info,struct npc_mcam_alloc_entry_rsp * rsp,int req_prio)654 flow_validate_and_shift_prio_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
655 				 struct otx2_npc_flow_info *flow_info,
656 				 struct npc_mcam_alloc_entry_rsp *rsp,
657 				 int req_prio)
658 {
659 	int prio_idx = 0, rc = 0, needs_shift = 0, idx, prio = flow->priority;
660 	struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
661 	int dir = (req_prio == NPC_MCAM_HIGHER_PRIO) ? 1 : -1;
662 	uint32_t tot_ent = 0;
663 
664 	otx2_npc_dbg("Dir %d, priority = %d", dir, prio);
665 
666 	if (dir < 0)
667 		prio_idx = flow_info->flow_max_priority - 1;
668 
669 	/* Only live entries needs to be shifted, free entries can just be
670 	 * moved by bits manipulation.
671 	 */
672 
673 	/* For dir = -1(NPC_MCAM_LOWER_PRIO), when shifting,
674 	 * NPC_MAX_PREALLOC_ENT are exchanged with adjoining higher priority
675 	 * level entries(lower indexes).
676 	 *
677 	 * For dir = +1(NPC_MCAM_HIGHER_PRIO), during shift,
678 	 * NPC_MAX_PREALLOC_ENT are exchanged with adjoining lower priority
679 	 * level entries(higher indexes) with highest indexes.
680 	 */
681 	do {
682 		tot_ent = info[prio_idx].free_ent + info[prio_idx].live_ent;
683 
684 		if (dir < 0 && prio_idx != prio &&
685 		    rsp->entry > info[prio_idx].max_id && tot_ent) {
686 			otx2_npc_dbg("Rsp entry %u prio idx %u "
687 				     "max id %u", rsp->entry, prio_idx,
688 				      info[prio_idx].max_id);
689 
690 			needs_shift = 1;
691 		} else if ((dir > 0) && (prio_idx != prio) &&
692 		     (rsp->entry < info[prio_idx].min_id) && tot_ent) {
693 			otx2_npc_dbg("Rsp entry %u prio idx %u "
694 				     "min id %u", rsp->entry, prio_idx,
695 				      info[prio_idx].min_id);
696 			needs_shift = 1;
697 		}
698 
699 		otx2_npc_dbg("Needs_shift = %d", needs_shift);
700 		if (needs_shift) {
701 			needs_shift = 0;
702 			rc = flow_shift_ent(mbox, flow, flow_info, rsp, dir,
703 					    prio_idx);
704 		} else {
705 			for (idx = 0; idx < rsp->count; idx++)
706 				rsp->entry_list[idx] = rsp->entry + idx;
707 		}
708 	} while ((prio_idx != prio) && (prio_idx += dir));
709 
710 	return rc;
711 }
712 
713 static int
flow_find_ref_entry(struct otx2_npc_flow_info * flow_info,int * prio,int prio_lvl)714 flow_find_ref_entry(struct otx2_npc_flow_info *flow_info, int *prio,
715 		    int prio_lvl)
716 {
717 	struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
718 	int step = 1;
719 
720 	while (step < flow_info->flow_max_priority) {
721 		if (((prio_lvl + step) < flow_info->flow_max_priority) &&
722 		    info[prio_lvl + step].live_ent) {
723 			*prio = NPC_MCAM_HIGHER_PRIO;
724 			return info[prio_lvl + step].min_id;
725 		}
726 
727 		if (((prio_lvl - step) >= 0) &&
728 		    info[prio_lvl - step].live_ent) {
729 			otx2_npc_dbg("Prio_lvl %u live %u", prio_lvl - step,
730 				     info[prio_lvl - step].live_ent);
731 			*prio = NPC_MCAM_LOWER_PRIO;
732 			return info[prio_lvl - step].max_id;
733 		}
734 		step++;
735 	}
736 	*prio = NPC_MCAM_ANY_PRIO;
737 	return 0;
738 }
739 
740 static int
flow_fill_entry_cache(struct otx2_mbox * mbox,struct rte_flow * flow,struct otx2_npc_flow_info * flow_info,uint32_t * free_ent)741 flow_fill_entry_cache(struct otx2_mbox *mbox, struct rte_flow *flow,
742 		      struct otx2_npc_flow_info *flow_info, uint32_t *free_ent)
743 {
744 	struct rte_bitmap *free_bmp, *free_bmp_rev, *live_bmp, *live_bmp_rev;
745 	struct npc_mcam_alloc_entry_rsp rsp_local;
746 	struct npc_mcam_alloc_entry_rsp *rsp_cmd;
747 	struct npc_mcam_alloc_entry_req *req;
748 	struct npc_mcam_alloc_entry_rsp *rsp;
749 	struct otx2_mcam_ents_info *info;
750 	uint16_t ref_ent, idx;
751 	int rc, prio;
752 
753 	info = &flow_info->flow_entry_info[flow->priority];
754 	free_bmp = flow_info->free_entries[flow->priority];
755 	free_bmp_rev = flow_info->free_entries_rev[flow->priority];
756 	live_bmp = flow_info->live_entries[flow->priority];
757 	live_bmp_rev = flow_info->live_entries_rev[flow->priority];
758 
759 	ref_ent = flow_find_ref_entry(flow_info, &prio, flow->priority);
760 
761 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
762 	req->contig = 1;
763 	req->count = flow_info->flow_prealloc_size;
764 	req->priority = prio;
765 	req->ref_entry = ref_ent;
766 
767 	otx2_npc_dbg("Fill cache ref entry %u prio %u", ref_ent, prio);
768 
769 	otx2_mbox_msg_send(mbox, 0);
770 	rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp_cmd);
771 	if (rc)
772 		return rc;
773 
774 	rsp = &rsp_local;
775 	memcpy(rsp, rsp_cmd, sizeof(*rsp));
776 
777 	otx2_npc_dbg("Alloc entry %u count %u , prio = %d", rsp->entry,
778 		     rsp->count, prio);
779 
780 	/* Non-first ent cache fill */
781 	if (prio != NPC_MCAM_ANY_PRIO) {
782 		flow_validate_and_shift_prio_ent(mbox, flow, flow_info, rsp,
783 						 prio);
784 	} else {
785 		/* Copy into response entry list */
786 		for (idx = 0; idx < rsp->count; idx++)
787 			rsp->entry_list[idx] = rsp->entry + idx;
788 	}
789 
790 	otx2_npc_dbg("Fill entry cache rsp count %u", rsp->count);
791 	/* Update free entries, reverse free entries list,
792 	 * min & max entry ids.
793 	 */
794 	for (idx = 0; idx < rsp->count; idx++) {
795 		if (unlikely(rsp->entry_list[idx] < info->min_id))
796 			info->min_id = rsp->entry_list[idx];
797 
798 		if (unlikely(rsp->entry_list[idx] > info->max_id))
799 			info->max_id = rsp->entry_list[idx];
800 
801 		/* Skip entry to be returned, not to be part of free
802 		 * list.
803 		 */
804 		if (prio == NPC_MCAM_HIGHER_PRIO) {
805 			if (unlikely(idx == (rsp->count - 1))) {
806 				*free_ent = rsp->entry_list[idx];
807 				continue;
808 			}
809 		} else {
810 			if (unlikely(!idx)) {
811 				*free_ent = rsp->entry_list[idx];
812 				continue;
813 			}
814 		}
815 		info->free_ent++;
816 		rte_bitmap_set(free_bmp, rsp->entry_list[idx]);
817 		rte_bitmap_set(free_bmp_rev, flow_info->mcam_entries -
818 			       rsp->entry_list[idx] - 1);
819 
820 		otx2_npc_dbg("Final rsp entry %u rsp entry rev %u",
821 			     rsp->entry_list[idx],
822 		flow_info->mcam_entries - rsp->entry_list[idx] - 1);
823 	}
824 
825 	otx2_npc_dbg("Cache free entry %u, rev = %u", *free_ent,
826 		     flow_info->mcam_entries - *free_ent - 1);
827 	info->live_ent++;
828 	rte_bitmap_set(live_bmp, *free_ent);
829 	rte_bitmap_set(live_bmp_rev, flow_info->mcam_entries - *free_ent - 1);
830 
831 	return 0;
832 }
833 
834 static int
flow_check_preallocated_entry_cache(struct otx2_mbox * mbox,struct rte_flow * flow,struct otx2_npc_flow_info * flow_info)835 flow_check_preallocated_entry_cache(struct otx2_mbox *mbox,
836 				    struct rte_flow *flow,
837 				    struct otx2_npc_flow_info *flow_info)
838 {
839 	struct rte_bitmap *free, *free_rev, *live, *live_rev;
840 	uint32_t pos = 0, free_ent = 0, mcam_entries;
841 	struct otx2_mcam_ents_info *info;
842 	uint64_t slab = 0;
843 	int rc;
844 
845 	otx2_npc_dbg("Flow priority %u", flow->priority);
846 
847 	info = &flow_info->flow_entry_info[flow->priority];
848 
849 	free_rev = flow_info->free_entries_rev[flow->priority];
850 	free = flow_info->free_entries[flow->priority];
851 	live_rev = flow_info->live_entries_rev[flow->priority];
852 	live = flow_info->live_entries[flow->priority];
853 	mcam_entries = flow_info->mcam_entries;
854 
855 	if (info->free_ent) {
856 		rc = rte_bitmap_scan(free, &pos, &slab);
857 		if (rc) {
858 			/* Get free_ent from free entry bitmap */
859 			free_ent = pos + __builtin_ctzll(slab);
860 			otx2_npc_dbg("Allocated from cache entry %u", free_ent);
861 			/* Remove from free bitmaps and add to live ones */
862 			rte_bitmap_clear(free, free_ent);
863 			rte_bitmap_set(live, free_ent);
864 			rte_bitmap_clear(free_rev,
865 					 mcam_entries - free_ent - 1);
866 			rte_bitmap_set(live_rev,
867 				       mcam_entries - free_ent - 1);
868 
869 			info->free_ent--;
870 			info->live_ent++;
871 			return free_ent;
872 		}
873 
874 		otx2_npc_dbg("No free entry:its a mess");
875 		return -1;
876 	}
877 
878 	rc = flow_fill_entry_cache(mbox, flow, flow_info, &free_ent);
879 	if (rc)
880 		return rc;
881 
882 	return free_ent;
883 }
884 
885 int
otx2_flow_mcam_alloc_and_write(struct rte_flow * flow,struct otx2_mbox * mbox,struct otx2_parse_state * pst,struct otx2_npc_flow_info * flow_info)886 otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, struct otx2_mbox *mbox,
887 			       struct otx2_parse_state *pst,
888 			       struct otx2_npc_flow_info *flow_info)
889 {
890 	int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1);
891 	struct npc_mcam_read_base_rule_rsp *base_rule_rsp;
892 	struct npc_mcam_write_entry_req *req;
893 	struct mcam_entry *base_entry;
894 	struct mbox_msghdr *rsp;
895 	uint16_t ctr = ~(0);
896 	int rc, idx;
897 	int entry;
898 
899 	if (use_ctr) {
900 		rc = flow_mcam_alloc_counter(mbox, &ctr);
901 		if (rc)
902 			return rc;
903 	}
904 
905 	entry = flow_check_preallocated_entry_cache(mbox, flow, flow_info);
906 	if (entry < 0) {
907 		otx2_err("Prealloc failed");
908 		otx2_flow_mcam_free_counter(mbox, ctr);
909 		return NPC_MCAM_ALLOC_FAILED;
910 	}
911 
912 	if (pst->is_vf) {
913 		(void)otx2_mbox_alloc_msg_npc_read_base_steer_rule(mbox);
914 		rc = otx2_mbox_process_msg(mbox, (void *)&base_rule_rsp);
915 		if (rc) {
916 			otx2_err("Failed to fetch VF's base MCAM entry");
917 			return rc;
918 		}
919 		base_entry = &base_rule_rsp->entry_data;
920 		for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
921 			flow->mcam_data[idx] |= base_entry->kw[idx];
922 			flow->mcam_mask[idx] |= base_entry->kw_mask[idx];
923 		}
924 	}
925 
926 	req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
927 	req->set_cntr = use_ctr;
928 	req->cntr = ctr;
929 	req->entry = entry;
930 	otx2_npc_dbg("Alloc & write entry %u", entry);
931 
932 	req->intf =
933 		(flow->nix_intf == OTX2_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
934 	req->enable_entry = 1;
935 	req->entry_data.action = flow->npc_action;
936 	req->entry_data.vtag_action = flow->vtag_action;
937 
938 	for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
939 		req->entry_data.kw[idx] = flow->mcam_data[idx];
940 		req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
941 	}
942 
943 	if (flow->nix_intf == OTX2_INTF_RX) {
944 		req->entry_data.kw[0] |= flow_info->channel;
945 		req->entry_data.kw_mask[0] |=  (BIT_ULL(12) - 1);
946 	} else {
947 		uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
948 
949 		pf_func = htons(pf_func);
950 		req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
951 		req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
952 	}
953 
954 	otx2_mbox_msg_send(mbox, 0);
955 	rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
956 	if (rc != 0)
957 		return rc;
958 
959 	flow->mcam_id = entry;
960 	if (use_ctr)
961 		flow->ctr_id = ctr;
962 	return 0;
963 }
964