xref: /dpdk/drivers/net/ice/ice_acl_filter.c (revision e360df56)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include <rte_bitmap.h>
22 #include "base/ice_type.h"
23 #include "base/ice_acl.h"
24 #include "ice_logs.h"
25 #include "ice_ethdev.h"
26 #include "ice_generic_flow.h"
27 #include "base/ice_flow.h"
28 
29 #define MAX_ACL_SLOTS_ID 2048
30 
31 #define ICE_ACL_INSET_ETH_IPV4 ( \
32 	ICE_INSET_SMAC | ICE_INSET_DMAC | \
33 	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
34 #define ICE_ACL_INSET_ETH_IPV4_UDP ( \
35 	ICE_ACL_INSET_ETH_IPV4 | \
36 	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
37 #define ICE_ACL_INSET_ETH_IPV4_TCP ( \
38 	ICE_ACL_INSET_ETH_IPV4 | \
39 	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
40 #define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
41 	ICE_ACL_INSET_ETH_IPV4 | \
42 	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
43 
44 static struct ice_flow_parser ice_acl_parser;
45 
46 struct acl_rule {
47 	enum ice_fltr_ptype flow_type;
48 	uint64_t entry_id[4];
49 };
50 
51 static struct
52 ice_pattern_match_item ice_acl_pattern[] = {
53 	{pattern_eth_ipv4,	ICE_ACL_INSET_ETH_IPV4,		ICE_INSET_NONE,	ICE_INSET_NONE},
54 	{pattern_eth_ipv4_udp,	ICE_ACL_INSET_ETH_IPV4_UDP,	ICE_INSET_NONE,	ICE_INSET_NONE},
55 	{pattern_eth_ipv4_tcp,	ICE_ACL_INSET_ETH_IPV4_TCP,	ICE_INSET_NONE,	ICE_INSET_NONE},
56 	{pattern_eth_ipv4_sctp,	ICE_ACL_INSET_ETH_IPV4_SCTP,	ICE_INSET_NONE,	ICE_INSET_NONE},
57 };
58 
59 static int
ice_acl_prof_alloc(struct ice_hw * hw)60 ice_acl_prof_alloc(struct ice_hw *hw)
61 {
62 	enum ice_fltr_ptype ptype, fltr_ptype;
63 
64 	if (!hw->acl_prof) {
65 		hw->acl_prof = (struct ice_fd_hw_prof **)
66 			ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
67 				   sizeof(*hw->acl_prof));
68 		if (!hw->acl_prof)
69 			return -ENOMEM;
70 	}
71 
72 	for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
73 	     ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
74 		if (!hw->acl_prof[ptype]) {
75 			hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
76 				ice_malloc(hw, sizeof(**hw->acl_prof));
77 			if (!hw->acl_prof[ptype])
78 				goto fail_mem;
79 		}
80 	}
81 
82 	return 0;
83 
84 fail_mem:
85 	for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
86 	     fltr_ptype < ptype; fltr_ptype++) {
87 		rte_free(hw->acl_prof[fltr_ptype]);
88 		hw->acl_prof[fltr_ptype] = NULL;
89 	}
90 
91 	rte_free(hw->acl_prof);
92 	hw->acl_prof = NULL;
93 
94 	return -ENOMEM;
95 }
96 
97 /**
98  * ice_acl_setup - Reserve and initialize the ACL resources
99  * @pf: board private structure
100  */
101 static int
ice_acl_setup(struct ice_pf * pf)102 ice_acl_setup(struct ice_pf *pf)
103 {
104 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
105 	uint32_t pf_num = hw->dev_caps.num_funcs;
106 	struct ice_acl_tbl_params params;
107 	uint16_t scen_id;
108 	int err = 0;
109 
110 	memset(&params, 0, sizeof(params));
111 
112 	/* create for IPV4 table */
113 	if (pf_num < 4)
114 		params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
115 	else
116 		params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
117 
118 	params.depth = ICE_AQC_ACL_TCAM_DEPTH;
119 	params.entry_act_pairs = 1;
120 	params.concurr = false;
121 
122 	err = ice_acl_create_tbl(hw, &params);
123 	if (err)
124 		return err;
125 
126 	err = ice_acl_create_scen(hw, params.width, params.depth,
127 				  &scen_id);
128 	if (err)
129 		return err;
130 
131 	return 0;
132 }
133 
134 /**
135  * ice_deinit_acl - Unroll the initialization of the ACL block
136  * @pf: ptr to PF device
137  *
138  * returns 0 on success, negative on error
139  */
ice_deinit_acl(struct ice_pf * pf)140 static void ice_deinit_acl(struct ice_pf *pf)
141 {
142 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
143 
144 	ice_acl_destroy_tbl(hw);
145 
146 	rte_free(hw->acl_tbl);
147 	hw->acl_tbl = NULL;
148 
149 	if (pf->acl.slots) {
150 		rte_free(pf->acl.slots);
151 		pf->acl.slots = NULL;
152 	}
153 }
154 
155 static void
acl_add_prof_prepare(struct ice_hw * hw,struct ice_flow_seg_info * seg,bool is_l4,uint16_t src_port,uint16_t dst_port)156 acl_add_prof_prepare(struct ice_hw *hw, struct ice_flow_seg_info *seg,
157 		     bool is_l4, uint16_t src_port, uint16_t dst_port)
158 {
159 	uint16_t val_loc, mask_loc;
160 
161 	if (hw->dev_caps.num_funcs < 4) {
162 		/* mac source address */
163 		val_loc = offsetof(struct ice_fdir_fltr,
164 				   ext_data.src_mac);
165 		mask_loc = offsetof(struct ice_fdir_fltr,
166 				    ext_mask.src_mac);
167 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
168 				 val_loc, mask_loc,
169 				 ICE_FLOW_FLD_OFF_INVAL, false);
170 
171 		/* mac destination address */
172 		val_loc = offsetof(struct ice_fdir_fltr,
173 				   ext_data.dst_mac);
174 		mask_loc = offsetof(struct ice_fdir_fltr,
175 				    ext_mask.dst_mac);
176 		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
177 				 val_loc, mask_loc,
178 				 ICE_FLOW_FLD_OFF_INVAL, false);
179 	}
180 
181 	/* IP source address */
182 	val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
183 	mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
184 	ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
185 			 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
186 
187 	/* IP destination address */
188 	val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
189 	mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
190 	ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
191 			 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
192 
193 	if (is_l4) {
194 		/* Layer 4 source port */
195 		val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
196 		mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
197 		ice_flow_set_fld(seg, src_port, val_loc,
198 				 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
199 
200 		/* Layer 4 destination port */
201 		val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
202 		mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
203 		ice_flow_set_fld(seg, dst_port, val_loc,
204 				 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
205 	}
206 }
207 
208 /**
209  * ice_acl_prof_init - Initialize ACL profile
210  * @pf: ice PF structure
211  *
212  * Returns 0 on success.
213  */
214 static int
ice_acl_prof_init(struct ice_pf * pf)215 ice_acl_prof_init(struct ice_pf *pf)
216 {
217 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
218 	struct ice_flow_prof *prof_ipv4 = NULL;
219 	struct ice_flow_prof *prof_ipv4_udp = NULL;
220 	struct ice_flow_prof *prof_ipv4_tcp = NULL;
221 	struct ice_flow_prof *prof_ipv4_sctp = NULL;
222 	struct ice_flow_seg_info *seg;
223 	int i;
224 	int ret;
225 
226 	seg = (struct ice_flow_seg_info *)
227 		 ice_malloc(hw, sizeof(*seg));
228 	if (!seg)
229 		return -ENOMEM;
230 
231 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
232 	acl_add_prof_prepare(hw, seg, false, 0, 0);
233 	ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
234 				ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
235 				seg, 1, NULL, 0, &prof_ipv4);
236 	if (ret)
237 		goto err_add_prof;
238 
239 	ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
240 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
241 	acl_add_prof_prepare(hw, seg, true,
242 			     ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
243 			     ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
244 	ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
245 				ICE_FLTR_PTYPE_NONF_IPV4_UDP,
246 				seg, 1, NULL, 0, &prof_ipv4_udp);
247 	if (ret)
248 		goto err_add_prof_ipv4_udp;
249 
250 	ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
251 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
252 	acl_add_prof_prepare(hw, seg, true,
253 			     ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
254 			     ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
255 	ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
256 				ICE_FLTR_PTYPE_NONF_IPV4_TCP,
257 				seg, 1, NULL, 0, &prof_ipv4_tcp);
258 	if (ret)
259 		goto err_add_prof_ipv4_tcp;
260 
261 	ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
262 	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
263 	acl_add_prof_prepare(hw, seg, true,
264 			     ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
265 			     ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
266 	ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
267 				ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
268 				seg, 1, NULL, 0, &prof_ipv4_sctp);
269 	if (ret)
270 		goto err_add_prof_ipv4_sctp;
271 
272 	for (i = 0; i < pf->main_vsi->idx; i++) {
273 		ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4, i);
274 		if (ret)
275 			goto err_assoc_prof;
276 
277 		ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i);
278 		if (ret)
279 			goto err_assoc_prof;
280 
281 		ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_tcp, i);
282 		if (ret)
283 			goto err_assoc_prof;
284 
285 		ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_sctp, i);
286 		if (ret)
287 			goto err_assoc_prof;
288 	}
289 	return 0;
290 
291 err_assoc_prof:
292 	ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_SCTP);
293 err_add_prof_ipv4_sctp:
294 	ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
295 err_add_prof_ipv4_tcp:
296 	ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
297 err_add_prof_ipv4_udp:
298 	ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_OTHER);
299 err_add_prof:
300 	ice_free(hw, seg);
301 	return ret;
302 }
303 
304 /**
305  * ice_acl_set_input_set - Helper function to set the input set for ACL
306  * @hw: pointer to HW instance
307  * @filter: pointer to ACL info
308  * @input: filter structure
309  *
310  * Return error value or 0 on success.
311  */
312 static int
ice_acl_set_input_set(struct ice_acl_conf * filter,struct ice_fdir_fltr * input)313 ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
314 {
315 	if (!input)
316 		return ICE_ERR_BAD_PTR;
317 
318 	input->q_index = filter->input.q_index;
319 	input->dest_vsi = filter->input.dest_vsi;
320 	input->dest_ctl = filter->input.dest_ctl;
321 	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
322 	input->flow_type = filter->input.flow_type;
323 
324 	switch (input->flow_type) {
325 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
326 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
327 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
328 		input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
329 		input->ip.v4.src_port = filter->input.ip.v4.src_port;
330 		input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
331 		input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
332 
333 		input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
334 		input->mask.v4.src_port = filter->input.mask.v4.src_port;
335 		input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
336 		input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
337 
338 		ice_memcpy(&input->ext_data.src_mac,
339 			   &filter->input.ext_data.src_mac,
340 			   RTE_ETHER_ADDR_LEN,
341 			   ICE_NONDMA_TO_NONDMA);
342 
343 		ice_memcpy(&input->ext_mask.src_mac,
344 			   &filter->input.ext_mask.src_mac,
345 			   RTE_ETHER_ADDR_LEN,
346 			   ICE_NONDMA_TO_NONDMA);
347 
348 		ice_memcpy(&input->ext_data.dst_mac,
349 			   &filter->input.ext_data.dst_mac,
350 			   RTE_ETHER_ADDR_LEN,
351 			   ICE_NONDMA_TO_NONDMA);
352 		ice_memcpy(&input->ext_mask.dst_mac,
353 			   &filter->input.ext_mask.dst_mac,
354 			   RTE_ETHER_ADDR_LEN,
355 			   ICE_NONDMA_TO_NONDMA);
356 
357 		break;
358 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
359 		ice_memcpy(&input->ip.v4, &filter->input.ip.v4,
360 			   sizeof(struct ice_fdir_v4),
361 			   ICE_NONDMA_TO_NONDMA);
362 		ice_memcpy(&input->mask.v4, &filter->input.mask.v4,
363 			   sizeof(struct ice_fdir_v4),
364 			   ICE_NONDMA_TO_NONDMA);
365 
366 		ice_memcpy(&input->ext_data.src_mac,
367 			   &filter->input.ext_data.src_mac,
368 			   RTE_ETHER_ADDR_LEN,
369 			   ICE_NONDMA_TO_NONDMA);
370 		ice_memcpy(&input->ext_mask.src_mac,
371 			   &filter->input.ext_mask.src_mac,
372 			   RTE_ETHER_ADDR_LEN,
373 			   ICE_NONDMA_TO_NONDMA);
374 
375 		ice_memcpy(&input->ext_data.dst_mac,
376 			   &filter->input.ext_data.dst_mac,
377 			   RTE_ETHER_ADDR_LEN,
378 			   ICE_NONDMA_TO_NONDMA);
379 		ice_memcpy(&input->ext_mask.dst_mac,
380 			   &filter->input.ext_mask.dst_mac,
381 			   RTE_ETHER_ADDR_LEN,
382 			   ICE_NONDMA_TO_NONDMA);
383 
384 		break;
385 	default:
386 		return -EINVAL;
387 	}
388 
389 	return 0;
390 }
391 
392 static inline int
ice_acl_alloc_slot_id(struct rte_bitmap * slots,uint32_t * slot_id)393 ice_acl_alloc_slot_id(struct rte_bitmap *slots, uint32_t *slot_id)
394 {
395 	uint32_t pos = 0;
396 	uint64_t slab = 0;
397 	uint32_t i = 0;
398 
399 	__rte_bitmap_scan_init(slots);
400 	if (!rte_bitmap_scan(slots, &pos, &slab))
401 		return -rte_errno;
402 
403 	i = rte_bsf64(slab);
404 	pos += i;
405 	rte_bitmap_clear(slots, pos);
406 
407 	*slot_id = pos;
408 	return 0;
409 }
410 
411 static inline int
ice_acl_hw_set_conf(struct ice_pf * pf,struct ice_fdir_fltr * input,struct ice_flow_action * acts,struct acl_rule * rule,enum ice_fltr_ptype flow_type,int32_t entry_idx)412 ice_acl_hw_set_conf(struct ice_pf *pf, struct ice_fdir_fltr *input,
413 		    struct ice_flow_action *acts, struct acl_rule *rule,
414 		    enum ice_fltr_ptype flow_type, int32_t entry_idx)
415 {
416 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
417 	enum ice_block blk = ICE_BLK_ACL;
418 	uint64_t entry_id, hw_entry;
419 	uint32_t slot_id = 0;
420 	int act_cnt = 1;
421 	int ret = 0;
422 
423 	/* Allocate slot_id from bitmap table. */
424 	ret = ice_acl_alloc_slot_id(pf->acl.slots, &slot_id);
425 	if (ret) {
426 		PMD_DRV_LOG(ERR, "fail to alloc slot id.");
427 		return ret;
428 	}
429 
430 	/* For IPV4_OTHER type, should add entry for all types.
431 	 * For IPV4_UDP/TCP/SCTP type, only add entry for each.
432 	 */
433 	if (slot_id < MAX_ACL_NORMAL_ENTRIES) {
434 		entry_id = ((uint64_t)flow_type << 32) | slot_id;
435 		ret = ice_flow_add_entry(hw, blk, flow_type,
436 					 entry_id, pf->main_vsi->idx,
437 					 ICE_FLOW_PRIO_NORMAL, input,
438 					 acts, act_cnt, &hw_entry);
439 		if (ret) {
440 			PMD_DRV_LOG(ERR, "Fail to add entry.");
441 			return ret;
442 		}
443 		rule->entry_id[entry_idx] = entry_id;
444 		pf->acl.hw_entry_id[slot_id] = hw_entry;
445 	} else {
446 		PMD_DRV_LOG(ERR, "Exceed the maximum entry number(%d)"
447 			    " HW supported!", MAX_ACL_NORMAL_ENTRIES);
448 		return -1;
449 	}
450 
451 	return 0;
452 }
453 
454 static inline void
ice_acl_del_entry(struct ice_hw * hw,uint64_t entry_id)455 ice_acl_del_entry(struct ice_hw *hw, uint64_t entry_id)
456 {
457 	uint64_t hw_entry;
458 
459 	hw_entry = ice_flow_find_entry(hw, ICE_BLK_ACL, entry_id);
460 	ice_flow_rem_entry(hw, ICE_BLK_ACL, hw_entry);
461 }
462 
463 static inline void
ice_acl_hw_rem_conf(struct ice_pf * pf,struct acl_rule * rule,int32_t entry_idx)464 ice_acl_hw_rem_conf(struct ice_pf *pf, struct acl_rule *rule, int32_t entry_idx)
465 {
466 	uint32_t slot_id;
467 	int32_t i;
468 	uint64_t entry_id;
469 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
470 
471 	for (i = 0; i < entry_idx; i++) {
472 		entry_id = rule->entry_id[i];
473 		slot_id = ICE_LO_DWORD(entry_id);
474 		rte_bitmap_set(pf->acl.slots, slot_id);
475 		ice_acl_del_entry(hw, entry_id);
476 	}
477 }
478 
479 static int
ice_acl_create_filter(struct ice_adapter * ad,struct rte_flow * flow,void * meta,struct rte_flow_error * error)480 ice_acl_create_filter(struct ice_adapter *ad,
481 		      struct rte_flow *flow,
482 		      void *meta,
483 		      struct rte_flow_error *error)
484 {
485 	struct ice_acl_conf *filter = meta;
486 	enum ice_fltr_ptype flow_type = filter->input.flow_type;
487 	struct ice_flow_action acts[1];
488 	struct ice_pf *pf = &ad->pf;
489 	struct ice_fdir_fltr *input;
490 	struct acl_rule *rule;
491 	int ret;
492 
493 	rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
494 	if (!rule) {
495 		rte_flow_error_set(error, ENOMEM,
496 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
497 				   "Failed to allocate memory for acl rule");
498 		return -rte_errno;
499 	}
500 
501 	input = rte_zmalloc("acl_entry", sizeof(*input), 0);
502 	if (!input) {
503 		rte_flow_error_set(error, ENOMEM,
504 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
505 				   "Failed to allocate memory for acl input");
506 		ret = -rte_errno;
507 		goto err_acl_input_alloc;
508 	}
509 
510 	ret = ice_acl_set_input_set(filter, input);
511 	if (ret) {
512 		rte_flow_error_set(error, -ret,
513 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
514 				   "failed to set input set.");
515 		ret = -rte_errno;
516 		goto err_acl_set_input;
517 	}
518 
519 	if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
520 		acts[0].type = ICE_FLOW_ACT_DROP;
521 		acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
522 		acts[0].data.acl_act.prio = 0x3;
523 		acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
524 	}
525 
526 	input->acl_fltr = true;
527 	ret = ice_acl_hw_set_conf(pf, input, acts, rule, flow_type, 0);
528 	if (ret) {
529 		rte_flow_error_set(error, -ret,
530 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
531 				   "failed to set hw configure.");
532 		ret = -rte_errno;
533 		return ret;
534 	}
535 
536 	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
537 		ret = ice_acl_hw_set_conf(pf, input, acts, rule,
538 					  ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1);
539 		if (ret)
540 			goto err_acl_hw_set_conf_udp;
541 		ret = ice_acl_hw_set_conf(pf, input, acts, rule,
542 					  ICE_FLTR_PTYPE_NONF_IPV4_TCP, 2);
543 		if (ret)
544 			goto err_acl_hw_set_conf_tcp;
545 		ret = ice_acl_hw_set_conf(pf, input, acts, rule,
546 					  ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 3);
547 		if (ret)
548 			goto err_acl_hw_set_conf_sctp;
549 	}
550 
551 	rule->flow_type = flow_type;
552 	flow->rule = rule;
553 	return 0;
554 
555 err_acl_hw_set_conf_sctp:
556 	ice_acl_hw_rem_conf(pf, rule, 3);
557 err_acl_hw_set_conf_tcp:
558 	ice_acl_hw_rem_conf(pf, rule, 2);
559 err_acl_hw_set_conf_udp:
560 	ice_acl_hw_rem_conf(pf, rule, 1);
561 err_acl_set_input:
562 	rte_free(input);
563 err_acl_input_alloc:
564 	rte_free(rule);
565 	return ret;
566 }
567 
568 static int
ice_acl_destroy_filter(struct ice_adapter * ad,struct rte_flow * flow,struct rte_flow_error * error __rte_unused)569 ice_acl_destroy_filter(struct ice_adapter *ad,
570 		       struct rte_flow *flow,
571 		       struct rte_flow_error *error __rte_unused)
572 {
573 	struct acl_rule *rule = (struct acl_rule *)flow->rule;
574 	uint32_t slot_id, i;
575 	uint64_t entry_id;
576 	struct ice_pf *pf = &ad->pf;
577 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
578 	int ret = 0;
579 
580 	switch (rule->flow_type) {
581 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
582 		for (i = 0; i < 4; i++) {
583 			entry_id = rule->entry_id[i];
584 			slot_id = ICE_LO_DWORD(entry_id);
585 			rte_bitmap_set(pf->acl.slots, slot_id);
586 			ice_acl_del_entry(hw, entry_id);
587 		}
588 		break;
589 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
590 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
591 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
592 		entry_id = rule->entry_id[0];
593 		slot_id = ICE_LO_DWORD(entry_id);
594 		rte_bitmap_set(pf->acl.slots, slot_id);
595 		ice_acl_del_entry(hw, entry_id);
596 		break;
597 	default:
598 		rte_flow_error_set(error, EINVAL,
599 				   RTE_FLOW_ERROR_TYPE_ITEM,
600 				   NULL, "Unsupported flow type.");
601 		break;
602 	}
603 
604 	flow->rule = NULL;
605 	rte_free(rule);
606 	return ret;
607 }
608 
609 static void
ice_acl_filter_free(struct rte_flow * flow)610 ice_acl_filter_free(struct rte_flow *flow)
611 {
612 	rte_free(flow->rule);
613 	flow->rule = NULL;
614 }
615 
616 static int
ice_acl_parse_action(__rte_unused struct ice_adapter * ad,const struct rte_flow_action actions[],struct rte_flow_error * error,struct ice_acl_conf * filter)617 ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
618 		     const struct rte_flow_action actions[],
619 		     struct rte_flow_error *error,
620 		     struct ice_acl_conf *filter)
621 {
622 	uint32_t dest_num = 0;
623 
624 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
625 		switch (actions->type) {
626 		case RTE_FLOW_ACTION_TYPE_VOID:
627 			break;
628 		case RTE_FLOW_ACTION_TYPE_DROP:
629 			dest_num++;
630 
631 			filter->input.dest_ctl =
632 				ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
633 			break;
634 		default:
635 			rte_flow_error_set(error, EINVAL,
636 				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
637 				   "Invalid action.");
638 			return -rte_errno;
639 		}
640 	}
641 
642 	if (dest_num == 0 || dest_num >= 2) {
643 		rte_flow_error_set(error, EINVAL,
644 			   RTE_FLOW_ERROR_TYPE_ACTION, actions,
645 			   "Unsupported action combination");
646 		return -rte_errno;
647 	}
648 
649 	return 0;
650 }
651 
652 static int
ice_acl_parse_pattern(__rte_unused struct ice_adapter * ad,const struct rte_flow_item pattern[],struct rte_flow_error * error,struct ice_acl_conf * filter)653 ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
654 		       const struct rte_flow_item pattern[],
655 		       struct rte_flow_error *error,
656 		       struct ice_acl_conf *filter)
657 {
658 	const struct rte_flow_item *item = pattern;
659 	enum rte_flow_item_type item_type;
660 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
661 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
662 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
663 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
664 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
665 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
666 	uint64_t input_set = ICE_INSET_NONE;
667 	uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
668 
669 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
670 		item_type = item->type;
671 
672 		switch (item_type) {
673 		case RTE_FLOW_ITEM_TYPE_ETH:
674 			eth_spec = item->spec;
675 			eth_mask = item->mask;
676 
677 			if (eth_spec && eth_mask) {
678 				if (rte_is_broadcast_ether_addr(&eth_mask->src) ||
679 				    rte_is_broadcast_ether_addr(&eth_mask->dst)) {
680 					rte_flow_error_set(error, EINVAL,
681 						RTE_FLOW_ERROR_TYPE_ITEM,
682 						item, "Invalid mac addr mask");
683 					return -rte_errno;
684 				}
685 
686 				if (!rte_is_zero_ether_addr(&eth_spec->src) &&
687 				    !rte_is_zero_ether_addr(&eth_mask->src)) {
688 					input_set |= ICE_INSET_SMAC;
689 					ice_memcpy(&filter->input.ext_data.src_mac,
690 						   &eth_spec->src,
691 						   RTE_ETHER_ADDR_LEN,
692 						   ICE_NONDMA_TO_NONDMA);
693 					ice_memcpy(&filter->input.ext_mask.src_mac,
694 						   &eth_mask->src,
695 						   RTE_ETHER_ADDR_LEN,
696 						   ICE_NONDMA_TO_NONDMA);
697 				}
698 
699 				if (!rte_is_zero_ether_addr(&eth_spec->dst) &&
700 				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
701 					input_set |= ICE_INSET_DMAC;
702 					ice_memcpy(&filter->input.ext_data.dst_mac,
703 						   &eth_spec->dst,
704 						   RTE_ETHER_ADDR_LEN,
705 						   ICE_NONDMA_TO_NONDMA);
706 					ice_memcpy(&filter->input.ext_mask.dst_mac,
707 						   &eth_mask->dst,
708 						   RTE_ETHER_ADDR_LEN,
709 						   ICE_NONDMA_TO_NONDMA);
710 				}
711 			}
712 			break;
713 		case RTE_FLOW_ITEM_TYPE_IPV4:
714 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
715 			ipv4_spec = item->spec;
716 			ipv4_mask = item->mask;
717 
718 			if (ipv4_spec && ipv4_mask) {
719 				/* Check IPv4 mask and update input set */
720 				if (ipv4_mask->hdr.version_ihl ||
721 				    ipv4_mask->hdr.total_length ||
722 				    ipv4_mask->hdr.packet_id ||
723 				    ipv4_mask->hdr.fragment_offset ||
724 				    ipv4_mask->hdr.hdr_checksum) {
725 					rte_flow_error_set(error, EINVAL,
726 						RTE_FLOW_ERROR_TYPE_ITEM,
727 						item,
728 						"Invalid IPv4 mask.");
729 					return -rte_errno;
730 				}
731 
732 				if (ipv4_mask->hdr.src_addr == UINT32_MAX ||
733 				    ipv4_mask->hdr.dst_addr == UINT32_MAX) {
734 					rte_flow_error_set(error, EINVAL,
735 						RTE_FLOW_ERROR_TYPE_ITEM,
736 						item,
737 						"Invalid IPv4 mask.");
738 					return -rte_errno;
739 				}
740 
741 				if (ipv4_mask->hdr.src_addr) {
742 					filter->input.ip.v4.src_ip =
743 						ipv4_spec->hdr.src_addr;
744 					filter->input.mask.v4.src_ip =
745 						ipv4_mask->hdr.src_addr;
746 
747 					input_set |= ICE_INSET_IPV4_SRC;
748 				}
749 
750 				if (ipv4_mask->hdr.dst_addr) {
751 					filter->input.ip.v4.dst_ip =
752 						ipv4_spec->hdr.dst_addr;
753 					filter->input.mask.v4.dst_ip =
754 						ipv4_mask->hdr.dst_addr;
755 
756 					input_set |= ICE_INSET_IPV4_DST;
757 				}
758 			}
759 
760 			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
761 			break;
762 		case RTE_FLOW_ITEM_TYPE_TCP:
763 			tcp_spec = item->spec;
764 			tcp_mask = item->mask;
765 
766 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
767 				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
768 
769 			if (tcp_spec && tcp_mask) {
770 				/* Check TCP mask and update input set */
771 				if (tcp_mask->hdr.sent_seq ||
772 				    tcp_mask->hdr.recv_ack ||
773 				    tcp_mask->hdr.data_off ||
774 				    tcp_mask->hdr.tcp_flags ||
775 				    tcp_mask->hdr.rx_win ||
776 				    tcp_mask->hdr.cksum ||
777 				    tcp_mask->hdr.tcp_urp) {
778 					rte_flow_error_set(error, EINVAL,
779 						RTE_FLOW_ERROR_TYPE_ITEM,
780 						item,
781 						"Invalid TCP mask");
782 					return -rte_errno;
783 				}
784 
785 				if (tcp_mask->hdr.src_port == UINT16_MAX ||
786 				    tcp_mask->hdr.dst_port == UINT16_MAX) {
787 					rte_flow_error_set(error, EINVAL,
788 						RTE_FLOW_ERROR_TYPE_ITEM,
789 						item,
790 						"Invalid TCP mask");
791 					return -rte_errno;
792 				}
793 
794 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
795 				    tcp_mask->hdr.src_port) {
796 					input_set |= ICE_INSET_TCP_SRC_PORT;
797 					filter->input.ip.v4.src_port =
798 						tcp_spec->hdr.src_port;
799 					filter->input.mask.v4.src_port =
800 						tcp_mask->hdr.src_port;
801 				}
802 
803 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
804 				    tcp_mask->hdr.dst_port) {
805 					input_set |= ICE_INSET_TCP_DST_PORT;
806 					filter->input.ip.v4.dst_port =
807 						tcp_spec->hdr.dst_port;
808 					filter->input.mask.v4.dst_port =
809 						tcp_mask->hdr.dst_port;
810 				}
811 			}
812 			break;
813 		case RTE_FLOW_ITEM_TYPE_UDP:
814 			udp_spec = item->spec;
815 			udp_mask = item->mask;
816 
817 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
818 				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
819 
820 			if (udp_spec && udp_mask) {
821 				/* Check UDP mask and update input set*/
822 				if (udp_mask->hdr.dgram_len ||
823 				    udp_mask->hdr.dgram_cksum) {
824 					rte_flow_error_set(error, EINVAL,
825 						RTE_FLOW_ERROR_TYPE_ITEM,
826 						item,
827 						"Invalid UDP mask");
828 					return -rte_errno;
829 				}
830 
831 				if (udp_mask->hdr.src_port == UINT16_MAX ||
832 				    udp_mask->hdr.dst_port == UINT16_MAX) {
833 					rte_flow_error_set(error, EINVAL,
834 						RTE_FLOW_ERROR_TYPE_ITEM,
835 						item,
836 						"Invalid UDP mask");
837 					return -rte_errno;
838 				}
839 
840 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
841 				    udp_mask->hdr.src_port) {
842 					input_set |= ICE_INSET_UDP_SRC_PORT;
843 					filter->input.ip.v4.src_port =
844 						udp_spec->hdr.src_port;
845 					filter->input.mask.v4.src_port =
846 						udp_mask->hdr.src_port;
847 				}
848 
849 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
850 				    udp_mask->hdr.dst_port) {
851 					input_set |= ICE_INSET_UDP_DST_PORT;
852 					filter->input.ip.v4.dst_port =
853 						udp_spec->hdr.dst_port;
854 					filter->input.mask.v4.dst_port =
855 						udp_mask->hdr.dst_port;
856 				}
857 			}
858 			break;
859 		case RTE_FLOW_ITEM_TYPE_SCTP:
860 			sctp_spec = item->spec;
861 			sctp_mask = item->mask;
862 
863 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
864 				flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
865 
866 			if (sctp_spec && sctp_mask) {
867 				if (sctp_mask->hdr.src_port == UINT16_MAX ||
868 				    sctp_mask->hdr.dst_port == UINT16_MAX) {
869 					rte_flow_error_set(error, EINVAL,
870 						RTE_FLOW_ERROR_TYPE_ITEM,
871 						item,
872 						"Invalid SCTP mask");
873 					return -rte_errno;
874 				}
875 
876 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
877 				    sctp_mask->hdr.src_port) {
878 					input_set |= ICE_INSET_SCTP_SRC_PORT;
879 					filter->input.ip.v4.src_port =
880 						sctp_spec->hdr.src_port;
881 					filter->input.mask.v4.src_port =
882 						sctp_mask->hdr.src_port;
883 				}
884 
885 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
886 				    sctp_mask->hdr.dst_port) {
887 					input_set |= ICE_INSET_SCTP_DST_PORT;
888 					filter->input.ip.v4.dst_port =
889 						sctp_spec->hdr.dst_port;
890 					filter->input.mask.v4.dst_port =
891 						sctp_mask->hdr.dst_port;
892 				}
893 			}
894 			break;
895 		case RTE_FLOW_ITEM_TYPE_VOID:
896 			break;
897 		default:
898 			rte_flow_error_set(error, EINVAL,
899 				RTE_FLOW_ERROR_TYPE_ITEM,
900 				item,
901 				"Invalid pattern item.");
902 			return -rte_errno;
903 		}
904 	}
905 
906 	filter->input.flow_type = flow_type;
907 	filter->input_set = input_set;
908 
909 	return 0;
910 }
911 
912 static int
ice_acl_parse(struct ice_adapter * ad,struct ice_pattern_match_item * array,uint32_t array_len,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],uint32_t priority,void ** meta,struct rte_flow_error * error)913 ice_acl_parse(struct ice_adapter *ad,
914 	       struct ice_pattern_match_item *array,
915 	       uint32_t array_len,
916 	       const struct rte_flow_item pattern[],
917 	       const struct rte_flow_action actions[],
918 	       uint32_t priority,
919 	       void **meta,
920 	       struct rte_flow_error *error)
921 {
922 	struct ice_pf *pf = &ad->pf;
923 	struct ice_acl_conf *filter = &pf->acl.conf;
924 	struct ice_pattern_match_item *item = NULL;
925 	uint64_t input_set;
926 	int ret;
927 
928 	if (priority >= 1)
929 		return -rte_errno;
930 
931 	memset(filter, 0, sizeof(*filter));
932 	item = ice_search_pattern_match_item(ad, pattern, array, array_len,
933 					     error);
934 	if (!item)
935 		return -rte_errno;
936 
937 	ret = ice_acl_parse_pattern(ad, pattern, error, filter);
938 	if (ret)
939 		goto error;
940 	input_set = filter->input_set;
941 	if (!input_set || input_set & ~item->input_set_mask_o) {
942 		rte_flow_error_set(error, EINVAL,
943 				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
944 				   pattern,
945 				   "Invalid input set");
946 		ret = -rte_errno;
947 		goto error;
948 	}
949 
950 	ret = ice_acl_parse_action(ad, actions, error, filter);
951 	if (ret)
952 		goto error;
953 
954 	if (meta)
955 		*meta = filter;
956 
957 error:
958 	rte_free(item);
959 	return ret;
960 }
961 
962 static int
ice_acl_bitmap_init(struct ice_pf * pf)963 ice_acl_bitmap_init(struct ice_pf *pf)
964 {
965 	uint32_t bmp_size;
966 	void *mem = NULL;
967 	struct rte_bitmap *slots;
968 	int ret = 0;
969 	bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
970 	mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
971 	if (mem == NULL) {
972 		PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
973 		return -rte_errno;
974 	}
975 
976 	slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
977 	if (slots == NULL) {
978 		PMD_DRV_LOG(ERR, "Failed to initialize acl bitmap.");
979 		ret = -rte_errno;
980 		goto err_acl_mem_alloc;
981 	}
982 	pf->acl.slots = slots;
983 	return 0;
984 
985 err_acl_mem_alloc:
986 	rte_free(mem);
987 	return ret;
988 }
989 
990 static int
ice_acl_init(struct ice_adapter * ad)991 ice_acl_init(struct ice_adapter *ad)
992 {
993 	int ret = 0;
994 	struct ice_pf *pf = &ad->pf;
995 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
996 	struct ice_flow_parser *parser = &ice_acl_parser;
997 
998 	if (!ad->hw.dcf_enabled)
999 		return 0;
1000 
1001 	ret = ice_acl_prof_alloc(hw);
1002 	if (ret) {
1003 		PMD_DRV_LOG(ERR, "Cannot allocate memory for "
1004 			    "ACL profile.");
1005 		return -ENOMEM;
1006 	}
1007 
1008 	ret = ice_acl_setup(pf);
1009 	if (ret)
1010 		return ret;
1011 
1012 	ret = ice_acl_bitmap_init(pf);
1013 	if (ret)
1014 		return ret;
1015 
1016 	ret = ice_acl_prof_init(pf);
1017 	if (ret)
1018 		return ret;
1019 
1020 	return ice_register_parser(parser, ad);
1021 }
1022 
1023 static void
ice_acl_prof_free(struct ice_hw * hw)1024 ice_acl_prof_free(struct ice_hw *hw)
1025 {
1026 	enum ice_fltr_ptype ptype;
1027 
1028 	for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
1029 	     ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
1030 		rte_free(hw->acl_prof[ptype]);
1031 		hw->acl_prof[ptype] = NULL;
1032 	}
1033 
1034 	rte_free(hw->acl_prof);
1035 	hw->acl_prof = NULL;
1036 }
1037 
1038 static void
ice_acl_uninit(struct ice_adapter * ad)1039 ice_acl_uninit(struct ice_adapter *ad)
1040 {
1041 	struct ice_pf *pf = &ad->pf;
1042 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1043 	struct ice_flow_parser *parser = &ice_acl_parser;
1044 
1045 	if (ad->hw.dcf_enabled) {
1046 		ice_unregister_parser(parser, ad);
1047 		ice_deinit_acl(pf);
1048 		ice_acl_prof_free(hw);
1049 	}
1050 }
1051 
1052 static struct
1053 ice_flow_engine ice_acl_engine = {
1054 	.init = ice_acl_init,
1055 	.uninit = ice_acl_uninit,
1056 	.create = ice_acl_create_filter,
1057 	.destroy = ice_acl_destroy_filter,
1058 	.free = ice_acl_filter_free,
1059 	.type = ICE_FLOW_ENGINE_ACL,
1060 };
1061 
1062 static struct
1063 ice_flow_parser ice_acl_parser = {
1064 	.engine = &ice_acl_engine,
1065 	.array = ice_acl_pattern,
1066 	.array_len = RTE_DIM(ice_acl_pattern),
1067 	.parse_pattern_action = ice_acl_parse,
1068 	.stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1069 };
1070 
RTE_INIT(ice_acl_engine_init)1071 RTE_INIT(ice_acl_engine_init)
1072 {
1073 	struct ice_flow_engine *engine = &ice_acl_engine;
1074 	ice_register_flow_engine(engine);
1075 }
1076