xref: /f-stack/dpdk/drivers/net/ice/base/ice_acl_ctrl.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "ice_acl.h"
6 #include "ice_flow.h"
7 
8 /* Determine the TCAM index of entry 'e' within the ACL table */
9 #define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH)
10 
11 /* Determine the entry index within the TCAM */
12 #define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH)
13 
14 #define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF
15 
16 /**
17  * ice_acl_init_entry
18  * @scen: pointer to the scenario struct
19  *
20  * Initialize the scenario control structure.
21  */
ice_acl_init_entry(struct ice_acl_scen * scen)22 static void ice_acl_init_entry(struct ice_acl_scen *scen)
23 {
24 	/* low priority: start from the highest index, 25% of total entries
25 	 * normal priority: start from the highest index, 50% of total entries
26 	 * high priority: start from the lowest index, 25% of total entries
27 	 */
28 	scen->first_idx[ICE_ACL_PRIO_LOW] = scen->num_entry - 1;
29 	scen->first_idx[ICE_ACL_PRIO_NORMAL] = scen->num_entry -
30 		scen->num_entry / 4 - 1;
31 	scen->first_idx[ICE_ACL_PRIO_HIGH] = 0;
32 
33 	scen->last_idx[ICE_ACL_PRIO_LOW] = scen->num_entry -
34 		scen->num_entry / 4;
35 	scen->last_idx[ICE_ACL_PRIO_NORMAL] = scen->num_entry / 4;
36 	scen->last_idx[ICE_ACL_PRIO_HIGH] = scen->num_entry / 4 - 1;
37 }
38 
39 /**
40  * ice_acl_scen_assign_entry_idx
41  * @scen: pointer to the scenario struct
42  * @prio: the priority of the flow entry being allocated
43  *
44  * To find the index of an available entry in scenario
45  *
46  * Returns ICE_ACL_SCEN_ENTRY_INVAL if fails
47  * Returns index on success
48  */
49 static u16
ice_acl_scen_assign_entry_idx(struct ice_acl_scen * scen,enum ice_acl_entry_prio prio)50 ice_acl_scen_assign_entry_idx(struct ice_acl_scen *scen,
51 			      enum ice_acl_entry_prio prio)
52 {
53 	u16 first_idx, last_idx, i;
54 	s8 step;
55 
56 	if (prio >= ICE_ACL_MAX_PRIO)
57 		return ICE_ACL_SCEN_ENTRY_INVAL;
58 
59 	first_idx = scen->first_idx[prio];
60 	last_idx = scen->last_idx[prio];
61 	step = first_idx <= last_idx ? 1 : -1;
62 
63 	for (i = first_idx; i != last_idx + step; i += step)
64 		if (!ice_test_and_set_bit(i, scen->entry_bitmap))
65 			return i;
66 
67 	return ICE_ACL_SCEN_ENTRY_INVAL;
68 }
69 
70 /**
71  * ice_acl_scen_free_entry_idx
72  * @scen: pointer to the scenario struct
73  * @idx: the index of the flow entry being de-allocated
74  *
75  * To mark an entry available in scenario
76  */
77 static enum ice_status
ice_acl_scen_free_entry_idx(struct ice_acl_scen * scen,u16 idx)78 ice_acl_scen_free_entry_idx(struct ice_acl_scen *scen, u16 idx)
79 {
80 	if (idx >= scen->num_entry)
81 		return ICE_ERR_MAX_LIMIT;
82 
83 	if (!ice_test_and_clear_bit(idx, scen->entry_bitmap))
84 		return ICE_ERR_DOES_NOT_EXIST;
85 
86 	return ICE_SUCCESS;
87 }
88 
89 /**
90  * ice_acl_tbl_calc_end_idx
91  * @start: start index of the TCAM entry of this partition
92  * @num_entries: number of entries in this partition
93  * @width: width of a partition in number of TCAMs
94  *
95  * Calculate the end entry index for a partition with starting entry index
96  * 'start', entries 'num_entries', and width 'width'.
97  */
ice_acl_tbl_calc_end_idx(u16 start,u16 num_entries,u16 width)98 static u16 ice_acl_tbl_calc_end_idx(u16 start, u16 num_entries, u16 width)
99 {
100 	u16 end_idx, add_entries = 0;
101 
102 	end_idx = start + (num_entries - 1);
103 
104 	/* In case that our ACL partition requires cascading TCAMs */
105 	if (width > 1) {
106 		u16 num_stack_level;
107 
108 		/* Figure out the TCAM stacked level in this ACL scenario */
109 		num_stack_level = (start % ICE_AQC_ACL_TCAM_DEPTH) +
110 			num_entries;
111 		num_stack_level = DIVIDE_AND_ROUND_UP(num_stack_level,
112 						      ICE_AQC_ACL_TCAM_DEPTH);
113 
114 		/* In this case, each entries in our ACL partition span
115 		 * multiple TCAMs. Thus, we will need to add
116 		 * ((width - 1) * num_stack_level) TCAM's entries to
117 		 * end_idx.
118 		 *
119 		 * For example : In our case, our scenario is 2x2:
120 		 *	[TCAM 0]	[TCAM 1]
121 		 *	[TCAM 2]	[TCAM 3]
122 		 * Assuming that a TCAM will have 512 entries. If "start"
123 		 * is 500, "num_entries" is 3 and "width" = 2, then end_idx
124 		 * should be 1024 (belongs to TCAM 2).
125 		 * Before going to this if statement, end_idx will have the
126 		 * value of 512. If "width" is 1, then the final value of
127 		 * end_idx is 512. However, in our case, width is 2, then we
128 		 * will need add (2 - 1) * 1 * 512. As result, end_idx will
129 		 * have the value of 1024.
130 		 */
131 		add_entries = (width - 1) * num_stack_level *
132 			ICE_AQC_ACL_TCAM_DEPTH;
133 	}
134 
135 	return end_idx + add_entries;
136 }
137 
138 /**
139  * ice_acl_init_tbl
140  * @hw: pointer to the hardware structure
141  *
142  * Initialize the ACL table by invalidating TCAM entries and action pairs.
143  */
ice_acl_init_tbl(struct ice_hw * hw)144 static enum ice_status ice_acl_init_tbl(struct ice_hw *hw)
145 {
146 	struct ice_aqc_actpair act_buf;
147 	struct ice_aqc_acl_data buf;
148 	enum ice_status status = ICE_SUCCESS;
149 	struct ice_acl_tbl *tbl;
150 	u8 tcam_idx, i;
151 	u16 idx;
152 
153 	tbl = hw->acl_tbl;
154 	if (!tbl)
155 		return ICE_ERR_CFG;
156 
157 	ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
158 	ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
159 
160 	tcam_idx = tbl->first_tcam;
161 	idx = tbl->first_entry;
162 	while (tcam_idx < tbl->last_tcam ||
163 	       (tcam_idx == tbl->last_tcam && idx <= tbl->last_entry)) {
164 		/* Use the same value for entry_key and entry_key_inv since
165 		 * we are initializing the fields to 0
166 		 */
167 		status = ice_aq_program_acl_entry(hw, tcam_idx, idx, &buf,
168 						  NULL);
169 		if (status)
170 			return status;
171 
172 		if (++idx > tbl->last_entry) {
173 			tcam_idx++;
174 			idx = tbl->first_entry;
175 		}
176 	}
177 
178 	for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) {
179 		u16 act_entry_idx, start, end;
180 
181 		if (tbl->act_mems[i].act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL)
182 			continue;
183 
184 		start = tbl->first_entry;
185 		end = tbl->last_entry;
186 
187 		for (act_entry_idx = start; act_entry_idx <= end;
188 		     act_entry_idx++) {
189 			/* Invalidate all allocated action pairs */
190 			status = ice_aq_program_actpair(hw, i, act_entry_idx,
191 							&act_buf, NULL);
192 			if (status)
193 				return status;
194 		}
195 	}
196 
197 	return status;
198 }
199 
200 /**
201  * ice_acl_assign_act_mems_to_tcam
202  * @tbl: pointer to ACL table structure
203  * @cur_tcam: Index of current TCAM. Value = 0 to (ICE_AQC_ACL_SLICES - 1)
204  * @cur_mem_idx: Index of current action memory bank. Value = 0 to
205  *		 (ICE_AQC_MAX_ACTION_MEMORIES - 1)
206  * @num_mem: Number of action memory banks for this TCAM
207  *
208  * Assign "num_mem" valid action memory banks from "curr_mem_idx" to
209  * "curr_tcam" TCAM.
210  */
211 static void
ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl * tbl,u8 cur_tcam,u8 * cur_mem_idx,u8 num_mem)212 ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam,
213 				u8 *cur_mem_idx, u8 num_mem)
214 {
215 	u8 mem_cnt;
216 
217 	for (mem_cnt = 0;
218 	     *cur_mem_idx < ICE_AQC_MAX_ACTION_MEMORIES && mem_cnt < num_mem;
219 	     (*cur_mem_idx)++) {
220 		struct ice_acl_act_mem *p_mem = &tbl->act_mems[*cur_mem_idx];
221 
222 		if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL)
223 			continue;
224 
225 		p_mem->member_of_tcam = cur_tcam;
226 
227 		mem_cnt++;
228 	}
229 }
230 
231 /**
232  * ice_acl_divide_act_mems_to_tcams
233  * @tbl: pointer to ACL table structure
234  *
235  * Figure out how to divide given action memory banks to given TCAMs. This
236  * division is for SW book keeping. In the time when scenario is created,
237  * an action memory bank can be used for different TCAM.
238  *
239  * For example, given that we have 2x2 ACL table with each table entry has
240  * 2 action memory pairs. As the result, we will have 4 TCAMs (T1,T2,T3,T4)
241  * and 4 action memory banks (A1,A2,A3,A4)
242  *	[T1 - T2] { A1 - A2 }
243  *	[T3 - T4] { A3 - A4 }
244  * In the time when we need to create a scenario, for example, 2x1 scenario,
245  * we will use [T3,T4] in a cascaded layout. As it is a requirement that all
246  * action memory banks in a cascaded TCAM's row will need to associate with
247  * the last TCAM. Thus, we will associate action memory banks [A3] and [A4]
248  * for TCAM [T4].
249  * For SW book-keeping purpose, we will keep theoretical maps between TCAM
250  * [Tn] to action memory bank [An].
251  */
ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl * tbl)252 static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl)
253 {
254 	u16 num_cscd, stack_level, stack_idx, min_act_mem;
255 	u8 tcam_idx = tbl->first_tcam;
256 	u16 max_idx_to_get_extra;
257 	u8 mem_idx = 0;
258 
259 	/* Determine number of stacked TCAMs */
260 	stack_level = DIVIDE_AND_ROUND_UP(tbl->info.depth,
261 					  ICE_AQC_ACL_TCAM_DEPTH);
262 
263 	/* Determine number of cascaded TCAMs */
264 	num_cscd = DIVIDE_AND_ROUND_UP(tbl->info.width,
265 				       ICE_AQC_ACL_KEY_WIDTH_BYTES);
266 
267 	/* In a line of cascaded TCAM, given the number of action memory
268 	 * banks per ACL table entry, we want to fairly divide these action
269 	 * memory banks between these TCAMs.
270 	 *
271 	 * For example, there are 3 TCAMs (TCAM 3,4,5) in a line of
272 	 * cascaded TCAM, and there are 7 act_mems for each ACL table entry.
273 	 * The result is:
274 	 *	[TCAM_3 will have 3 act_mems]
275 	 *	[TCAM_4 will have 2 act_mems]
276 	 *	[TCAM_5 will have 2 act_mems]
277 	 */
278 	min_act_mem = tbl->info.entry_act_pairs / num_cscd;
279 	max_idx_to_get_extra = tbl->info.entry_act_pairs % num_cscd;
280 
281 	for (stack_idx = 0; stack_idx < stack_level; stack_idx++) {
282 		u16 i;
283 
284 		for (i = 0; i < num_cscd; i++) {
285 			u8 total_act_mem = min_act_mem;
286 
287 			if (i < max_idx_to_get_extra)
288 				total_act_mem++;
289 
290 			ice_acl_assign_act_mems_to_tcam(tbl, tcam_idx,
291 							&mem_idx,
292 							total_act_mem);
293 
294 			tcam_idx++;
295 		}
296 	}
297 }
298 
299 /**
300  * ice_acl_create_tbl
301  * @hw: pointer to the HW struct
302  * @params: parameters for the table to be created
303  *
304  * Create a LEM table for ACL usage. We are currently starting with some fixed
305  * values for the size of the table, but this will need to grow as more flow
306  * entries are added by the user level.
307  */
308 enum ice_status
ice_acl_create_tbl(struct ice_hw * hw,struct ice_acl_tbl_params * params)309 ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
310 {
311 	u16 width, depth, first_e, last_e, i;
312 	struct ice_aqc_acl_generic *resp_buf;
313 	struct ice_acl_alloc_tbl tbl_alloc;
314 	struct ice_acl_tbl *tbl;
315 	enum ice_status status;
316 
317 	if (hw->acl_tbl)
318 		return ICE_ERR_ALREADY_EXISTS;
319 
320 	if (!params)
321 		return ICE_ERR_PARAM;
322 
323 	/* round up the width to the next TCAM width boundary. */
324 	width = ROUND_UP(params->width, (u16)ICE_AQC_ACL_KEY_WIDTH_BYTES);
325 	/* depth should be provided in chunk (64 entry) increments */
326 	depth = ICE_ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT);
327 
328 	if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) {
329 		params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
330 
331 		if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS)
332 			params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS;
333 	}
334 
335 	/* Validate that width*depth will not exceed the TCAM limit */
336 	if ((DIVIDE_AND_ROUND_UP(depth, ICE_AQC_ACL_TCAM_DEPTH) *
337 	     (width / ICE_AQC_ACL_KEY_WIDTH_BYTES)) > ICE_AQC_ACL_SLICES)
338 		return ICE_ERR_MAX_LIMIT;
339 
340 	ice_memset(&tbl_alloc, 0, sizeof(tbl_alloc), ICE_NONDMA_MEM);
341 	tbl_alloc.width = width;
342 	tbl_alloc.depth = depth;
343 	tbl_alloc.act_pairs_per_entry = params->entry_act_pairs;
344 	tbl_alloc.concurr = params->concurr;
345 	/* Set dependent_alloc_id only for concurrent table type */
346 	if (params->concurr) {
347 		tbl_alloc.num_dependent_alloc_ids =
348 			ICE_AQC_MAX_CONCURRENT_ACL_TBL;
349 
350 		for (i = 0; i < ICE_AQC_MAX_CONCURRENT_ACL_TBL; i++)
351 			tbl_alloc.buf.data_buf.alloc_ids[i] =
352 				CPU_TO_LE16(params->dep_tbls[i]);
353 	}
354 
355 	/* call the AQ command to create the ACL table with these values */
356 	status = ice_aq_alloc_acl_tbl(hw, &tbl_alloc, NULL);
357 	if (status) {
358 		if (LE16_TO_CPU(tbl_alloc.buf.resp_buf.alloc_id) <
359 		    ICE_AQC_ALLOC_ID_LESS_THAN_4K)
360 			ice_debug(hw, ICE_DBG_ACL, "Alloc ACL table failed. Unavailable resource.\n");
361 		else
362 			ice_debug(hw, ICE_DBG_ACL, "AQ allocation of ACL failed with error. status: %d\n",
363 				  status);
364 		return status;
365 	}
366 
367 	tbl = (struct ice_acl_tbl *)ice_malloc(hw, sizeof(*tbl));
368 	if (!tbl) {
369 		status = ICE_ERR_NO_MEMORY;
370 
371 		goto out;
372 	}
373 
374 	resp_buf = &tbl_alloc.buf.resp_buf;
375 
376 	/* Retrieve information of the allocated table */
377 	tbl->id = LE16_TO_CPU(resp_buf->alloc_id);
378 	tbl->first_tcam = resp_buf->ops.table.first_tcam;
379 	tbl->last_tcam = resp_buf->ops.table.last_tcam;
380 	tbl->first_entry = LE16_TO_CPU(resp_buf->first_entry);
381 	tbl->last_entry = LE16_TO_CPU(resp_buf->last_entry);
382 
383 	tbl->info = *params;
384 	tbl->info.width = width;
385 	tbl->info.depth = depth;
386 	hw->acl_tbl = tbl;
387 
388 	for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++)
389 		tbl->act_mems[i].act_mem = resp_buf->act_mem[i];
390 
391 	/* Figure out which TCAMs that these newly allocated action memories
392 	 * belong to.
393 	 */
394 	ice_acl_divide_act_mems_to_tcams(tbl);
395 
396 	/* Initialize the resources allocated by invalidating all TCAM entries
397 	 * and all the action pairs
398 	 */
399 	status = ice_acl_init_tbl(hw);
400 	if (status) {
401 		ice_free(hw, tbl);
402 		hw->acl_tbl = NULL;
403 		ice_debug(hw, ICE_DBG_ACL, "Initialization of TCAM entries failed. status: %d\n",
404 			  status);
405 		goto out;
406 	}
407 
408 	first_e = (tbl->first_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
409 		(tbl->first_entry / ICE_ACL_ENTRY_ALLOC_UNIT);
410 	last_e = (tbl->last_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
411 		(tbl->last_entry / ICE_ACL_ENTRY_ALLOC_UNIT);
412 
413 	/* Indicate available entries in the table */
414 	ice_bitmap_set(tbl->avail, first_e, last_e - first_e + 1);
415 
416 	INIT_LIST_HEAD(&tbl->scens);
417 out:
418 
419 	return status;
420 }
421 
422 /**
423  * ice_acl_alloc_partition - Allocate a partition from the ACL table
424  * @hw: pointer to the hardware structure
425  * @req: info of partition being allocated
426  */
427 static enum ice_status
ice_acl_alloc_partition(struct ice_hw * hw,struct ice_acl_scen * req)428 ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req)
429 {
430 	u16 start = 0, cnt = 0, off = 0;
431 	u16 width, r_entries, row;
432 	bool done = false;
433 	int dir;
434 
435 	/* Determine the number of TCAMs each entry overlaps */
436 	width = DIVIDE_AND_ROUND_UP(req->width, ICE_AQC_ACL_KEY_WIDTH_BYTES);
437 
438 	/* Check if we have enough TCAMs to accommodate the width */
439 	if (width > hw->acl_tbl->last_tcam - hw->acl_tbl->first_tcam + 1)
440 		return ICE_ERR_MAX_LIMIT;
441 
442 	/* Number of entries must be multiple of ICE_ACL_ENTRY_ALLOC_UNIT's */
443 	r_entries = ICE_ALIGN(req->num_entry, ICE_ACL_ENTRY_ALLOC_UNIT);
444 
445 	/* To look for an available partition that can accommodate the request,
446 	 * the process first logically arranges available TCAMs in rows such
447 	 * that each row produces entries with the requested width. It then
448 	 * scans the TCAMs' available bitmap, one bit at a time, and
449 	 * accumulates contiguous available 64-entry chunks until there are
450 	 * enough of them or when all TCAM configurations have been checked.
451 	 *
452 	 * For width of 1 TCAM, the scanning process starts from the top most
453 	 * TCAM, and goes downward. Available bitmaps are examined from LSB
454 	 * to MSB.
455 	 *
456 	 * For width of multiple TCAMs, the process starts from the bottom-most
457 	 * row of TCAMs, and goes upward. Available bitmaps are examined from
458 	 * the MSB to the LSB.
459 	 *
460 	 * To make sure that adjacent TCAMs can be logically arranged in the
461 	 * same row, the scanning process may have multiple passes. In each
462 	 * pass, the first TCAM of the bottom-most row is displaced by one
463 	 * additional TCAM. The width of the row and the number of the TCAMs
464 	 * available determine the number of passes. When the displacement is
465 	 * more than the size of width, the TCAM row configurations will
466 	 * repeat. The process will terminate when the configurations repeat.
467 	 *
468 	 * Available partitions can span more than one row of TCAMs.
469 	 */
470 	if (width == 1) {
471 		row = hw->acl_tbl->first_tcam;
472 		dir = 1;
473 	} else {
474 		/* Start with the bottom-most row, and scan for available
475 		 * entries upward
476 		 */
477 		row = hw->acl_tbl->last_tcam + 1 - width;
478 		dir = -1;
479 	}
480 
481 	do {
482 		u16 i;
483 
484 		/* Scan all 64-entry chunks, one chunk at a time, in the
485 		 * current TCAM row
486 		 */
487 		for (i = 0;
488 		     i < ICE_AQC_MAX_TCAM_ALLOC_UNITS && cnt < r_entries;
489 		     i++) {
490 			bool avail = true;
491 			u16 w, p;
492 
493 			/* Compute the cumulative available mask across the
494 			 * TCAM row to determine if the current 64-entry chunk
495 			 * is available.
496 			 */
497 			p = dir > 0 ? i : ICE_AQC_MAX_TCAM_ALLOC_UNITS - i - 1;
498 			for (w = row; w < row + width && avail; w++) {
499 				u16 b;
500 
501 				b = (w * ICE_AQC_MAX_TCAM_ALLOC_UNITS) + p;
502 				avail &= ice_is_bit_set(hw->acl_tbl->avail, b);
503 			}
504 
505 			if (!avail) {
506 				cnt = 0;
507 			} else {
508 				/* Compute the starting index of the newly
509 				 * found partition. When 'dir' is negative, the
510 				 * scan processes is going upward. If so, the
511 				 * starting index needs to be updated for every
512 				 * available 64-entry chunk found.
513 				 */
514 				if (!cnt || dir < 0)
515 					start = (row * ICE_AQC_ACL_TCAM_DEPTH) +
516 						(p * ICE_ACL_ENTRY_ALLOC_UNIT);
517 				cnt += ICE_ACL_ENTRY_ALLOC_UNIT;
518 			}
519 		}
520 
521 		if (cnt >= r_entries) {
522 			req->start = start;
523 			req->num_entry = r_entries;
524 			req->end = ice_acl_tbl_calc_end_idx(start, r_entries,
525 							    width);
526 			break;
527 		}
528 
529 		row = dir > 0 ? row + width : row - width;
530 		if (row > hw->acl_tbl->last_tcam ||
531 		    row < hw->acl_tbl->first_tcam) {
532 			/* All rows have been checked. Increment 'off' that
533 			 * will help yield a different TCAM configuration in
534 			 * which adjacent TCAMs can be alternatively in the
535 			 * same row.
536 			 */
537 			off++;
538 
539 			/* However, if the new 'off' value yields previously
540 			 * checked configurations, then exit.
541 			 */
542 			if (off >= width)
543 				done = true;
544 			else
545 				row = dir > 0 ? off :
546 					hw->acl_tbl->last_tcam + 1 - off -
547 					width;
548 		}
549 	} while (!done);
550 
551 	return cnt >= r_entries ? ICE_SUCCESS : ICE_ERR_MAX_LIMIT;
552 }
553 
554 /**
555  * ice_acl_fill_tcam_select
556  * @scen_buf: Pointer to the scenario buffer that needs to be populated
557  * @scen: Pointer to the available space for the scenario
558  * @tcam_idx: Index of the TCAM used for this scenario
559  * @tcam_idx_in_cascade : Local index of the TCAM in the cascade scenario
560  *
561  * For all TCAM that participate in this scenario, fill out the tcam_select
562  * value.
563  */
564 static void
ice_acl_fill_tcam_select(struct ice_aqc_acl_scen * scen_buf,struct ice_acl_scen * scen,u16 tcam_idx,u16 tcam_idx_in_cascade)565 ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf,
566 			 struct ice_acl_scen *scen, u16 tcam_idx,
567 			 u16 tcam_idx_in_cascade)
568 {
569 	u16 cascade_cnt, idx;
570 	u8 j;
571 
572 	idx = tcam_idx_in_cascade * ICE_AQC_ACL_KEY_WIDTH_BYTES;
573 	cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width,
574 					  ICE_AQC_ACL_KEY_WIDTH_BYTES);
575 
576 	/* For each scenario, we reserved last three bytes of scenario width for
577 	 * profile ID, range checker, and packet direction. Thus, the last three
578 	 * bytes of the last cascaded TCAMs will have value of 1st, 31st and
579 	 * 32nd byte location of BYTE selection base.
580 	 *
581 	 * For other bytes in the TCAMs:
582 	 * For non-cascade mode (1 TCAM wide) scenario, TCAM[x]'s Select {0-1}
583 	 * select indices 0-1 of the Byte Selection Base
584 	 * For cascade mode, the leftmost TCAM of the first cascade row selects
585 	 * indices 0-4 of the Byte Selection Base; the second TCAM in the
586 	 * cascade row selects indices starting with 5-n
587 	 */
588 	for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) {
589 		/* PKT DIR uses the 1st location of Byte Selection Base: + 1 */
590 		u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx;
591 
592 		if (tcam_idx_in_cascade == cascade_cnt - 1) {
593 			if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM)
594 				val = ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK;
595 			else if (j == ICE_ACL_SCEN_PID_IDX_IN_TCAM)
596 				val = ICE_AQC_ACL_BYTE_SEL_BASE_PID;
597 			else if (j == ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM)
598 				val = ICE_AQC_ACL_BYTE_SEL_BASE_PKT_DIR;
599 		}
600 
601 		/* In case that scenario's width is greater than the width of
602 		 * the Byte selection base, we will not assign a value to the
603 		 * tcam_select[j]. As a result, the tcam_select[j] will have
604 		 * default value which is zero.
605 		 */
606 		if (val > ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK)
607 			continue;
608 
609 		scen_buf->tcam_cfg[tcam_idx].tcam_select[j] = val;
610 
611 		idx++;
612 	}
613 }
614 
615 /**
616  * ice_acl_set_scen_chnk_msk
617  * @scen_buf: Pointer to the scenario buffer that needs to be populated
618  * @scen: pointer to the available space for the scenario
619  *
620  * Set the chunk mask for the entries that will be used by this scenario
621  */
622 static void
ice_acl_set_scen_chnk_msk(struct ice_aqc_acl_scen * scen_buf,struct ice_acl_scen * scen)623 ice_acl_set_scen_chnk_msk(struct ice_aqc_acl_scen *scen_buf,
624 			  struct ice_acl_scen *scen)
625 {
626 	u16 tcam_idx, num_cscd, units, cnt;
627 	u8 chnk_offst;
628 
629 	/* Determine the starting TCAM index and offset of the start entry */
630 	tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start);
631 	chnk_offst = (u8)((scen->start % ICE_AQC_ACL_TCAM_DEPTH) /
632 			  ICE_ACL_ENTRY_ALLOC_UNIT);
633 
634 	/* Entries are allocated and tracked in multiple of 64's */
635 	units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT;
636 
637 	/* Determine number of cascaded TCAMs */
638 	num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
639 
640 	for (cnt = 0; cnt < units; cnt++) {
641 		u16 i;
642 
643 		/* Set the corresponding bitmap of individual 64-entry
644 		 * chunk spans across a cascade of 1 or more TCAMs
645 		 * For each TCAM, there will be (ICE_AQC_ACL_TCAM_DEPTH
646 		 * / ICE_ACL_ENTRY_ALLOC_UNIT) or 8 chunks.
647 		 */
648 		for (i = tcam_idx; i < tcam_idx + num_cscd; i++)
649 			scen_buf->tcam_cfg[i].chnk_msk |= BIT(chnk_offst);
650 
651 		chnk_offst = (chnk_offst + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS;
652 		if (!chnk_offst)
653 			tcam_idx += num_cscd;
654 	}
655 }
656 
657 /**
658  * ice_acl_assign_act_mem_for_scen
659  * @tbl: pointer to ACL table structure
660  * @scen: pointer to the scenario struct
661  * @scen_buf: pointer to the available space for the scenario
662  * @current_tcam_idx: theoretical index of the TCAM that we associated those
663  *		      action memory banks with, at the table creation time.
664  * @target_tcam_idx: index of the TCAM that we want to associate those action
665  *		     memory banks with.
666  */
667 static void
ice_acl_assign_act_mem_for_scen(struct ice_acl_tbl * tbl,struct ice_acl_scen * scen,struct ice_aqc_acl_scen * scen_buf,u8 current_tcam_idx,u8 target_tcam_idx)668 ice_acl_assign_act_mem_for_scen(struct ice_acl_tbl *tbl,
669 				struct ice_acl_scen *scen,
670 				struct ice_aqc_acl_scen *scen_buf,
671 				u8 current_tcam_idx, u8 target_tcam_idx)
672 {
673 	u8 i;
674 
675 	for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) {
676 		struct ice_acl_act_mem *p_mem = &tbl->act_mems[i];
677 
678 		if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL ||
679 		    p_mem->member_of_tcam != current_tcam_idx)
680 			continue;
681 
682 		scen_buf->act_mem_cfg[i] = target_tcam_idx;
683 		scen_buf->act_mem_cfg[i] |= ICE_AQC_ACL_SCE_ACT_MEM_EN;
684 		ice_set_bit(i, scen->act_mem_bitmap);
685 	}
686 }
687 
688 /**
689  * ice_acl_commit_partition - Indicate if the specified partition is active
690  * @hw: pointer to the hardware structure
691  * @scen: pointer to the scenario struct
692  * @commit: true if the partition is being commit
693  */
694 static void
ice_acl_commit_partition(struct ice_hw * hw,struct ice_acl_scen * scen,bool commit)695 ice_acl_commit_partition(struct ice_hw *hw, struct ice_acl_scen *scen,
696 			 bool commit)
697 {
698 	u16 tcam_idx, off, num_cscd, units, cnt;
699 
700 	/* Determine the starting TCAM index and offset of the start entry */
701 	tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start);
702 	off = (scen->start % ICE_AQC_ACL_TCAM_DEPTH) /
703 		ICE_ACL_ENTRY_ALLOC_UNIT;
704 
705 	/* Entries are allocated and tracked in multiple of 64's */
706 	units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT;
707 
708 	/* Determine number of cascaded TCAM */
709 	num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
710 
711 	for (cnt = 0; cnt < units; cnt++) {
712 		u16 w;
713 
714 		/* Set/clear the corresponding bitmap of individual 64-entry
715 		 * chunk spans across a row of 1 or more TCAMs
716 		 */
717 		for (w = 0; w < num_cscd; w++) {
718 			u16 b;
719 
720 			b = ((tcam_idx + w) * ICE_AQC_MAX_TCAM_ALLOC_UNITS) +
721 				off;
722 			if (commit)
723 				ice_set_bit(b, hw->acl_tbl->avail);
724 			else
725 				ice_clear_bit(b, hw->acl_tbl->avail);
726 		}
727 
728 		off = (off + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS;
729 		if (!off)
730 			tcam_idx += num_cscd;
731 	}
732 }
733 
734 /**
735  * ice_acl_create_scen
736  * @hw: pointer to the hardware structure
737  * @match_width: number of bytes to be matched in this scenario
738  * @num_entries: number of entries to be allocated for the scenario
739  * @scen_id: holds returned scenario ID if successful
740  */
741 enum ice_status
ice_acl_create_scen(struct ice_hw * hw,u16 match_width,u16 num_entries,u16 * scen_id)742 ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
743 		    u16 *scen_id)
744 {
745 	u8 cascade_cnt, first_tcam, last_tcam, i, k;
746 	struct ice_aqc_acl_scen scen_buf;
747 	struct ice_acl_scen *scen;
748 	enum ice_status status;
749 
750 	if (!hw->acl_tbl)
751 		return ICE_ERR_DOES_NOT_EXIST;
752 
753 	scen = (struct ice_acl_scen *)ice_malloc(hw, sizeof(*scen));
754 	if (!scen)
755 		return ICE_ERR_NO_MEMORY;
756 
757 	scen->start = hw->acl_tbl->first_entry;
758 	scen->width = ICE_AQC_ACL_KEY_WIDTH_BYTES *
759 		DIVIDE_AND_ROUND_UP(match_width, ICE_AQC_ACL_KEY_WIDTH_BYTES);
760 	scen->num_entry = num_entries;
761 
762 	status = ice_acl_alloc_partition(hw, scen);
763 	if (status)
764 		goto out;
765 
766 	ice_memset(&scen_buf, 0, sizeof(scen_buf), ICE_NONDMA_MEM);
767 
768 	/* Determine the number of cascade TCAMs, given the scenario's width */
769 	cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width,
770 					  ICE_AQC_ACL_KEY_WIDTH_BYTES);
771 	first_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
772 	last_tcam = ICE_ACL_TBL_TCAM_IDX(scen->end);
773 
774 	/* For each scenario, we reserved last three bytes of scenario width for
775 	 * packet direction flag, profile ID and range checker. Thus, we want to
776 	 * return back to the caller the eff_width, pkt_dir_idx, rng_chk_idx and
777 	 * pid_idx.
778 	 */
779 	scen->eff_width = cascade_cnt * ICE_AQC_ACL_KEY_WIDTH_BYTES -
780 		ICE_ACL_SCEN_MIN_WIDTH;
781 	scen->rng_chk_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
782 		ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM;
783 	scen->pid_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
784 		ICE_ACL_SCEN_PID_IDX_IN_TCAM;
785 	scen->pkt_dir_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES +
786 		ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM;
787 
788 	/* set the chunk mask for the tcams */
789 	ice_acl_set_scen_chnk_msk(&scen_buf, scen);
790 
791 	/* set the TCAM select and start_cmp and start_set bits */
792 	k = first_tcam;
793 	/* set the START_SET bit at the beginning of the stack */
794 	scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET;
795 	while (k <= last_tcam) {
796 		u8 last_tcam_idx_cascade = cascade_cnt + k - 1;
797 
798 		/* set start_cmp for the first cascaded TCAM */
799 		scen_buf.tcam_cfg[k].start_cmp_set |=
800 			ICE_AQC_ACL_ALLOC_SCE_START_CMP;
801 
802 		/* cascade TCAMs up to the width of the scenario */
803 		for (i = k; i < cascade_cnt + k; i++) {
804 			ice_acl_fill_tcam_select(&scen_buf, scen, i, i - k);
805 			ice_acl_assign_act_mem_for_scen(hw->acl_tbl, scen,
806 							&scen_buf,
807 							i,
808 							last_tcam_idx_cascade);
809 		}
810 
811 		k = i;
812 	}
813 
814 	/* We need to set the start_cmp bit for the unused TCAMs. */
815 	i = 0;
816 	while (i < first_tcam)
817 		scen_buf.tcam_cfg[i++].start_cmp_set =
818 					ICE_AQC_ACL_ALLOC_SCE_START_CMP;
819 
820 	i = last_tcam + 1;
821 	while (i < ICE_AQC_ACL_SLICES)
822 		scen_buf.tcam_cfg[i++].start_cmp_set =
823 					ICE_AQC_ACL_ALLOC_SCE_START_CMP;
824 
825 	status = ice_aq_alloc_acl_scen(hw, scen_id, &scen_buf, NULL);
826 	if (status) {
827 		ice_debug(hw, ICE_DBG_ACL, "AQ allocation of ACL scenario failed. status: %d\n",
828 			  status);
829 		goto out;
830 	}
831 
832 	scen->id = *scen_id;
833 	ice_acl_commit_partition(hw, scen, false);
834 	ice_acl_init_entry(scen);
835 	LIST_ADD(&scen->list_entry, &hw->acl_tbl->scens);
836 
837 out:
838 	if (status)
839 		ice_free(hw, scen);
840 
841 	return status;
842 }
843 
844 /**
845  * ice_acl_destroy_scen - Destroy an ACL scenario
846  * @hw: pointer to the HW struct
847  * @scen_id: ID of the remove scenario
848  */
ice_acl_destroy_scen(struct ice_hw * hw,u16 scen_id)849 static enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id)
850 {
851 	struct ice_acl_scen *scen, *tmp_scen;
852 	struct ice_flow_prof *p, *tmp;
853 	enum ice_status status;
854 
855 	if (!hw->acl_tbl)
856 		return ICE_ERR_DOES_NOT_EXIST;
857 
858 	/* Remove profiles that use "scen_id" scenario */
859 	LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[ICE_BLK_ACL],
860 				 ice_flow_prof, l_entry)
861 		if (p->cfg.scen && p->cfg.scen->id == scen_id) {
862 			status = ice_flow_rem_prof(hw, ICE_BLK_ACL, p->id);
863 			if (status) {
864 				ice_debug(hw, ICE_DBG_ACL, "ice_flow_rem_prof failed. status: %d\n",
865 					  status);
866 				return status;
867 			}
868 		}
869 
870 	/* Call the AQ command to destroy the targeted scenario */
871 	status = ice_aq_dealloc_acl_scen(hw, scen_id, NULL);
872 	if (status) {
873 		ice_debug(hw, ICE_DBG_ACL, "AQ de-allocation of scenario failed. status: %d\n",
874 			  status);
875 		return status;
876 	}
877 
878 	/* Remove scenario from hw->acl_tbl->scens */
879 	LIST_FOR_EACH_ENTRY_SAFE(scen, tmp_scen, &hw->acl_tbl->scens,
880 				 ice_acl_scen, list_entry)
881 		if (scen->id == scen_id) {
882 			LIST_DEL(&scen->list_entry);
883 			ice_free(hw, scen);
884 		}
885 
886 	return ICE_SUCCESS;
887 }
888 
889 /**
890  * ice_acl_destroy_tbl - Destroy a previously created LEM table for ACL
891  * @hw: pointer to the HW struct
892  */
ice_acl_destroy_tbl(struct ice_hw * hw)893 enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw)
894 {
895 	struct ice_acl_scen *pos_scen, *tmp_scen;
896 	struct ice_aqc_acl_generic resp_buf;
897 	struct ice_aqc_acl_scen buf;
898 	enum ice_status status;
899 	u8 i;
900 
901 	if (!hw->acl_tbl)
902 		return ICE_ERR_DOES_NOT_EXIST;
903 
904 	/* Mark all the created scenario's TCAM to stop the packet lookup and
905 	 * delete them afterward
906 	 */
907 	LIST_FOR_EACH_ENTRY_SAFE(pos_scen, tmp_scen, &hw->acl_tbl->scens,
908 				 ice_acl_scen, list_entry) {
909 		status = ice_aq_query_acl_scen(hw, pos_scen->id, &buf, NULL);
910 		if (status) {
911 			ice_debug(hw, ICE_DBG_ACL, "ice_aq_query_acl_scen() failed. status: %d\n",
912 				  status);
913 			return status;
914 		}
915 
916 		for (i = 0; i < ICE_AQC_ACL_SLICES; i++) {
917 			buf.tcam_cfg[i].chnk_msk = 0;
918 			buf.tcam_cfg[i].start_cmp_set =
919 					ICE_AQC_ACL_ALLOC_SCE_START_CMP;
920 		}
921 
922 		for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++)
923 			buf.act_mem_cfg[i] = 0;
924 
925 		status = ice_aq_update_acl_scen(hw, pos_scen->id, &buf, NULL);
926 		if (status) {
927 			ice_debug(hw, ICE_DBG_ACL, "ice_aq_update_acl_scen() failed. status: %d\n",
928 				  status);
929 			return status;
930 		}
931 
932 		status = ice_acl_destroy_scen(hw, pos_scen->id);
933 		if (status) {
934 			ice_debug(hw, ICE_DBG_ACL, "deletion of scenario failed. status: %d\n",
935 				  status);
936 			return status;
937 		}
938 	}
939 
940 	/* call the AQ command to destroy the ACL table */
941 	status = ice_aq_dealloc_acl_tbl(hw, hw->acl_tbl->id, &resp_buf, NULL);
942 	if (status) {
943 		ice_debug(hw, ICE_DBG_ACL, "AQ de-allocation of ACL failed. status: %d\n",
944 			  status);
945 		return status;
946 	}
947 
948 	ice_free(hw, hw->acl_tbl);
949 	hw->acl_tbl = NULL;
950 
951 	return ICE_SUCCESS;
952 }
953 
954 /**
955  * ice_acl_add_entry - Add a flow entry to an ACL scenario
956  * @hw: pointer to the HW struct
957  * @scen: scenario to add the entry to
958  * @prio: priority level of the entry being added
959  * @keys: buffer of the value of the key to be programmed to the ACL entry
960  * @inverts: buffer of the value of the key inverts to be programmed
961  * @acts: pointer to a buffer containing formatted actions
962  * @acts_cnt: indicates the number of actions stored in "acts"
963  * @entry_idx: returned scenario relative index of the added flow entry
964  *
965  * Given an ACL table and a scenario, to add the specified key and key invert
966  * to an available entry in the specified scenario.
967  * The "keys" and "inverts" buffers must be of the size which is the same as
968  * the scenario's width
969  */
970 enum ice_status
ice_acl_add_entry(struct ice_hw * hw,struct ice_acl_scen * scen,enum ice_acl_entry_prio prio,u8 * keys,u8 * inverts,struct ice_acl_act_entry * acts,u8 acts_cnt,u16 * entry_idx)971 ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
972 		  enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts,
973 		  struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx)
974 {
975 	u8 i, entry_tcam, num_cscd, offset;
976 	struct ice_aqc_acl_data buf;
977 	enum ice_status status = ICE_SUCCESS;
978 	u16 idx;
979 
980 	if (!scen)
981 		return ICE_ERR_DOES_NOT_EXIST;
982 
983 	*entry_idx = ice_acl_scen_assign_entry_idx(scen, prio);
984 	if (*entry_idx >= scen->num_entry) {
985 		*entry_idx = 0;
986 		return ICE_ERR_MAX_LIMIT;
987 	}
988 
989 	/* Determine number of cascaded TCAMs */
990 	num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
991 				       ICE_AQC_ACL_KEY_WIDTH_BYTES);
992 
993 	entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
994 	idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + *entry_idx);
995 
996 	ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
997 	for (i = 0; i < num_cscd; i++) {
998 		/* If the key spans more than one TCAM in the case of cascaded
999 		 * TCAMs, the key and key inverts need to be properly split
1000 		 * among TCAMs.E.g.bytes 0 - 4 go to an index in the first TCAM
1001 		 * and bytes 5 - 9 go to the same index in the next TCAM, etc.
1002 		 * If the entry spans more than one TCAM in a cascaded TCAM
1003 		 * mode, the programming of the entries in the TCAMs must be in
1004 		 * reversed order - the TCAM entry of the rightmost TCAM should
1005 		 * be programmed first; the TCAM entry of the leftmost TCAM
1006 		 * should be programmed last.
1007 		 */
1008 		offset = num_cscd - i - 1;
1009 		ice_memcpy(&buf.entry_key.val,
1010 			   &keys[offset * sizeof(buf.entry_key.val)],
1011 			   sizeof(buf.entry_key.val), ICE_NONDMA_TO_NONDMA);
1012 		ice_memcpy(&buf.entry_key_invert.val,
1013 			   &inverts[offset * sizeof(buf.entry_key_invert.val)],
1014 			   sizeof(buf.entry_key_invert.val),
1015 			   ICE_NONDMA_TO_NONDMA);
1016 		status = ice_aq_program_acl_entry(hw, entry_tcam + offset, idx,
1017 						  &buf, NULL);
1018 		if (status) {
1019 			ice_debug(hw, ICE_DBG_ACL, "aq program acl entry failed status: %d\n",
1020 				  status);
1021 			goto out;
1022 		}
1023 	}
1024 
1025 	/* Program the action memory */
1026 	status = ice_acl_prog_act(hw, scen, acts, acts_cnt, *entry_idx);
1027 
1028 out:
1029 	if (status) {
1030 		ice_acl_rem_entry(hw, scen, *entry_idx);
1031 		*entry_idx = 0;
1032 	}
1033 
1034 	return status;
1035 }
1036 
1037 /**
1038  * ice_acl_prog_act - Program a scenario's action memory
1039  * @hw: pointer to the HW struct
1040  * @scen: scenario to add the entry to
1041  * @acts: pointer to a buffer containing formatted actions
1042  * @acts_cnt: indicates the number of actions stored in "acts"
1043  * @entry_idx: scenario relative index of the added flow entry
1044  *
1045  * Program a scenario's action memory
1046  */
1047 enum ice_status
ice_acl_prog_act(struct ice_hw * hw,struct ice_acl_scen * scen,struct ice_acl_act_entry * acts,u8 acts_cnt,u16 entry_idx)1048 ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
1049 		 struct ice_acl_act_entry *acts, u8 acts_cnt,
1050 		 u16 entry_idx)
1051 {
1052 	u8 entry_tcam, num_cscd, i, actx_idx = 0;
1053 	struct ice_aqc_actpair act_buf;
1054 	enum ice_status status = ICE_SUCCESS;
1055 	u16 idx;
1056 
1057 	if (entry_idx >= scen->num_entry)
1058 		return ICE_ERR_MAX_LIMIT;
1059 
1060 	ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
1061 
1062 	/* Determine number of cascaded TCAMs */
1063 	num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
1064 				       ICE_AQC_ACL_KEY_WIDTH_BYTES);
1065 
1066 	entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
1067 	idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
1068 
1069 	ice_for_each_set_bit(i, scen->act_mem_bitmap,
1070 			     ICE_AQC_MAX_ACTION_MEMORIES) {
1071 		struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
1072 
1073 		if (actx_idx >= acts_cnt)
1074 			break;
1075 		if (mem->member_of_tcam >= entry_tcam &&
1076 		    mem->member_of_tcam < entry_tcam + num_cscd) {
1077 			ice_memcpy(&act_buf.act[0], &acts[actx_idx],
1078 				   sizeof(struct ice_acl_act_entry),
1079 				   ICE_NONDMA_TO_NONDMA);
1080 
1081 			if (++actx_idx < acts_cnt) {
1082 				ice_memcpy(&act_buf.act[1], &acts[actx_idx],
1083 					   sizeof(struct ice_acl_act_entry),
1084 					   ICE_NONDMA_TO_NONDMA);
1085 			}
1086 
1087 			status = ice_aq_program_actpair(hw, i, idx, &act_buf,
1088 							NULL);
1089 			if (status) {
1090 				ice_debug(hw, ICE_DBG_ACL, "program actpair failed status: %d\n",
1091 					  status);
1092 				break;
1093 			}
1094 			actx_idx++;
1095 		}
1096 	}
1097 
1098 	if (!status && actx_idx < acts_cnt)
1099 		status = ICE_ERR_MAX_LIMIT;
1100 
1101 	return status;
1102 }
1103 
1104 /**
1105  * ice_acl_rem_entry - Remove a flow entry from an ACL scenario
1106  * @hw: pointer to the HW struct
1107  * @scen: scenario to remove the entry from
1108  * @entry_idx: the scenario-relative index of the flow entry being removed
1109  */
1110 enum ice_status
ice_acl_rem_entry(struct ice_hw * hw,struct ice_acl_scen * scen,u16 entry_idx)1111 ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
1112 {
1113 	struct ice_aqc_actpair act_buf;
1114 	struct ice_aqc_acl_data buf;
1115 	u8 entry_tcam, num_cscd, i;
1116 	enum ice_status status = ICE_SUCCESS;
1117 	u16 idx;
1118 
1119 	if (!scen)
1120 		return ICE_ERR_DOES_NOT_EXIST;
1121 
1122 	if (entry_idx >= scen->num_entry)
1123 		return ICE_ERR_MAX_LIMIT;
1124 
1125 	if (!ice_is_bit_set(scen->entry_bitmap, entry_idx))
1126 		return ICE_ERR_DOES_NOT_EXIST;
1127 
1128 	/* Determine number of cascaded TCAMs */
1129 	num_cscd = DIVIDE_AND_ROUND_UP(scen->width,
1130 				       ICE_AQC_ACL_KEY_WIDTH_BYTES);
1131 
1132 	entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
1133 	idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
1134 
1135 	/* invalidate the flow entry */
1136 	ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1137 	for (i = 0; i < num_cscd; i++) {
1138 		status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf,
1139 						  NULL);
1140 		if (status)
1141 			ice_debug(hw, ICE_DBG_ACL, "AQ program ACL entry failed status: %d\n",
1142 				  status);
1143 	}
1144 
1145 	ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
1146 
1147 	ice_for_each_set_bit(i, scen->act_mem_bitmap,
1148 			     ICE_AQC_MAX_ACTION_MEMORIES) {
1149 		struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
1150 
1151 		if (mem->member_of_tcam >= entry_tcam &&
1152 		    mem->member_of_tcam < entry_tcam + num_cscd) {
1153 			/* Invalidate allocated action pairs */
1154 			status = ice_aq_program_actpair(hw, i, idx, &act_buf,
1155 							NULL);
1156 			if (status)
1157 				ice_debug(hw, ICE_DBG_ACL, "program actpair failed status: %d\n",
1158 					  status);
1159 		}
1160 	}
1161 
1162 	ice_acl_scen_free_entry_idx(scen, entry_idx);
1163 
1164 	return status;
1165 }
1166