xref: /f-stack/dpdk/drivers/net/hns3/hns3_flow.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 
12 /* Default default keys */
13 static uint8_t hns3_hash_key[] = {
14 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
15 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
16 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
17 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
18 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
19 };
20 
21 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
22 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
23 
24 /* Special Filter id for non-specific packet flagging. Don't change value */
25 #define HNS3_MAX_FILTER_ID	0x0FFF
26 
27 #define ETHER_TYPE_MASK		0xFFFF
28 #define IPPROTO_MASK		0xFF
29 #define TUNNEL_TYPE_MASK	0xFFFF
30 
31 #define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
32 #define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
33 #define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
34 #define HNS3_TUNNEL_TYPE_NVGRE		0x6558
35 
36 static enum rte_flow_item_type first_items[] = {
37 	RTE_FLOW_ITEM_TYPE_ETH,
38 	RTE_FLOW_ITEM_TYPE_IPV4,
39 	RTE_FLOW_ITEM_TYPE_IPV6,
40 	RTE_FLOW_ITEM_TYPE_TCP,
41 	RTE_FLOW_ITEM_TYPE_UDP,
42 	RTE_FLOW_ITEM_TYPE_SCTP,
43 	RTE_FLOW_ITEM_TYPE_ICMP,
44 	RTE_FLOW_ITEM_TYPE_NVGRE,
45 	RTE_FLOW_ITEM_TYPE_VXLAN,
46 	RTE_FLOW_ITEM_TYPE_GENEVE,
47 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
48 	RTE_FLOW_ITEM_TYPE_MPLS
49 };
50 
51 static enum rte_flow_item_type L2_next_items[] = {
52 	RTE_FLOW_ITEM_TYPE_VLAN,
53 	RTE_FLOW_ITEM_TYPE_IPV4,
54 	RTE_FLOW_ITEM_TYPE_IPV6
55 };
56 
57 static enum rte_flow_item_type L3_next_items[] = {
58 	RTE_FLOW_ITEM_TYPE_TCP,
59 	RTE_FLOW_ITEM_TYPE_UDP,
60 	RTE_FLOW_ITEM_TYPE_SCTP,
61 	RTE_FLOW_ITEM_TYPE_NVGRE,
62 	RTE_FLOW_ITEM_TYPE_ICMP
63 };
64 
65 static enum rte_flow_item_type L4_next_items[] = {
66 	RTE_FLOW_ITEM_TYPE_VXLAN,
67 	RTE_FLOW_ITEM_TYPE_GENEVE,
68 	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
69 	RTE_FLOW_ITEM_TYPE_MPLS
70 };
71 
72 static enum rte_flow_item_type tunnel_next_items[] = {
73 	RTE_FLOW_ITEM_TYPE_ETH,
74 	RTE_FLOW_ITEM_TYPE_VLAN
75 };
76 
77 struct items_step_mngr {
78 	enum rte_flow_item_type *items;
79 	int count;
80 };
81 
82 static inline void
net_addr_to_host(uint32_t * dst,const rte_be32_t * src,size_t len)83 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
84 {
85 	size_t i;
86 
87 	for (i = 0; i < len; i++)
88 		dst[i] = rte_be_to_cpu_32(src[i]);
89 }
90 
91 /*
92  * This function is used to find rss general action.
93  * 1. As we know RSS is used to spread packets among several queues, the flow
94  *    API provide the struct rte_flow_action_rss, user could config it's field
95  *    sush as: func/level/types/key/queue to control RSS function.
96  * 2. The flow API also support queue region configuration for hns3. It was
97  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
98  *    which action is RSS queues region.
99  * 3. When action is RSS, we use the following rule to distinguish:
100  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
101  *            region configuration.
102  *    Case other: an rss general action.
103  */
104 static const struct rte_flow_action *
hns3_find_rss_general_action(const struct rte_flow_item pattern[],const struct rte_flow_action actions[])105 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
106 			     const struct rte_flow_action actions[])
107 {
108 	const struct rte_flow_action *act = NULL;
109 	const struct hns3_rss_conf *rss;
110 	bool have_eth = false;
111 
112 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
113 		if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
114 			act = actions;
115 			break;
116 		}
117 	}
118 	if (!act)
119 		return NULL;
120 
121 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
122 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
123 			have_eth = true;
124 			break;
125 		}
126 	}
127 
128 	rss = act->conf;
129 	if (have_eth && rss->conf.queue_num) {
130 		/*
131 		 * Patter have ETH and action's queue_num > 0, indicate this is
132 		 * queue region configuration.
133 		 * Because queue region is implemented by FDIR + RSS in hns3
134 		 * hardware, it need enter FDIR process, so here return NULL to
135 		 * avoid enter RSS process.
136 		 */
137 		return NULL;
138 	}
139 
140 	return act;
141 }
142 
143 static inline struct hns3_flow_counter *
hns3_counter_lookup(struct rte_eth_dev * dev,uint32_t id)144 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
145 {
146 	struct hns3_adapter *hns = dev->data->dev_private;
147 	struct hns3_pf *pf = &hns->pf;
148 	struct hns3_flow_counter *cnt;
149 
150 	LIST_FOREACH(cnt, &pf->flow_counters, next) {
151 		if (cnt->id == id)
152 			return cnt;
153 	}
154 	return NULL;
155 }
156 
157 static int
hns3_counter_new(struct rte_eth_dev * dev,uint32_t shared,uint32_t id,struct rte_flow_error * error)158 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
159 		 struct rte_flow_error *error)
160 {
161 	struct hns3_adapter *hns = dev->data->dev_private;
162 	struct hns3_pf *pf = &hns->pf;
163 	struct hns3_flow_counter *cnt;
164 
165 	cnt = hns3_counter_lookup(dev, id);
166 	if (cnt) {
167 		if (!cnt->shared || cnt->shared != shared)
168 			return rte_flow_error_set(error, ENOTSUP,
169 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
170 				cnt,
171 				"Counter id is used, shared flag not match");
172 		cnt->ref_cnt++;
173 		return 0;
174 	}
175 
176 	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
177 	if (cnt == NULL)
178 		return rte_flow_error_set(error, ENOMEM,
179 					  RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
180 					  "Alloc mem for counter failed");
181 	cnt->id = id;
182 	cnt->shared = shared;
183 	cnt->ref_cnt = 1;
184 	cnt->hits = 0;
185 	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
186 	return 0;
187 }
188 
189 static int
hns3_counter_query(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_query_count * qc,struct rte_flow_error * error)190 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
191 		   struct rte_flow_query_count *qc,
192 		   struct rte_flow_error *error)
193 {
194 	struct hns3_adapter *hns = dev->data->dev_private;
195 	struct hns3_flow_counter *cnt;
196 	uint64_t value;
197 	int ret;
198 
199 	/* FDIR is available only in PF driver */
200 	if (hns->is_vf)
201 		return rte_flow_error_set(error, ENOTSUP,
202 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
203 					  "Fdir is not supported in VF");
204 	cnt = hns3_counter_lookup(dev, flow->counter_id);
205 	if (cnt == NULL)
206 		return rte_flow_error_set(error, EINVAL,
207 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
208 					  "Can't find counter id");
209 
210 	ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
211 	if (ret) {
212 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
213 				   NULL, "Read counter fail.");
214 		return ret;
215 	}
216 	qc->hits_set = 1;
217 	qc->hits = value;
218 
219 	return 0;
220 }
221 
222 static int
hns3_counter_release(struct rte_eth_dev * dev,uint32_t id)223 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
224 {
225 	struct hns3_adapter *hns = dev->data->dev_private;
226 	struct hns3_hw *hw = &hns->hw;
227 	struct hns3_flow_counter *cnt;
228 
229 	cnt = hns3_counter_lookup(dev, id);
230 	if (cnt == NULL) {
231 		hns3_err(hw, "Can't find available counter to release");
232 		return -EINVAL;
233 	}
234 	cnt->ref_cnt--;
235 	if (cnt->ref_cnt == 0) {
236 		LIST_REMOVE(cnt, next);
237 		rte_free(cnt);
238 	}
239 	return 0;
240 }
241 
242 static void
hns3_counter_flush(struct rte_eth_dev * dev)243 hns3_counter_flush(struct rte_eth_dev *dev)
244 {
245 	struct hns3_adapter *hns = dev->data->dev_private;
246 	struct hns3_pf *pf = &hns->pf;
247 	struct hns3_flow_counter *cnt_ptr;
248 
249 	cnt_ptr = LIST_FIRST(&pf->flow_counters);
250 	while (cnt_ptr) {
251 		LIST_REMOVE(cnt_ptr, next);
252 		rte_free(cnt_ptr);
253 		cnt_ptr = LIST_FIRST(&pf->flow_counters);
254 	}
255 }
256 
257 static int
hns3_handle_action_queue(struct rte_eth_dev * dev,const struct rte_flow_action * action,struct hns3_fdir_rule * rule,struct rte_flow_error * error)258 hns3_handle_action_queue(struct rte_eth_dev *dev,
259 			 const struct rte_flow_action *action,
260 			 struct hns3_fdir_rule *rule,
261 			 struct rte_flow_error *error)
262 {
263 	struct hns3_adapter *hns = dev->data->dev_private;
264 	const struct rte_flow_action_queue *queue;
265 	struct hns3_hw *hw = &hns->hw;
266 
267 	queue = (const struct rte_flow_action_queue *)action->conf;
268 	if (queue->index >= hw->used_rx_queues) {
269 		hns3_err(hw, "queue ID(%u) is greater than number of "
270 			  "available queue (%u) in driver.",
271 			  queue->index, hw->used_rx_queues);
272 		return rte_flow_error_set(error, EINVAL,
273 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
274 					  action, "Invalid queue ID in PF");
275 	}
276 
277 	rule->queue_id = queue->index;
278 	rule->nb_queues = 1;
279 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
280 	return 0;
281 }
282 
283 static int
hns3_handle_action_queue_region(struct rte_eth_dev * dev,const struct rte_flow_action * action,struct hns3_fdir_rule * rule,struct rte_flow_error * error)284 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
285 				const struct rte_flow_action *action,
286 				struct hns3_fdir_rule *rule,
287 				struct rte_flow_error *error)
288 {
289 	struct hns3_adapter *hns = dev->data->dev_private;
290 	const struct rte_flow_action_rss *conf = action->conf;
291 	struct hns3_hw *hw = &hns->hw;
292 	uint16_t idx;
293 
294 	if (!hns3_dev_fd_queue_region_supported(hw))
295 		return rte_flow_error_set(error, ENOTSUP,
296 			RTE_FLOW_ERROR_TYPE_ACTION, action,
297 			"Not support config queue region!");
298 
299 	if ((!rte_is_power_of_2(conf->queue_num)) ||
300 		conf->queue_num > hw->rss_size_max ||
301 		conf->queue[0] >= hw->used_rx_queues ||
302 		conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
303 		return rte_flow_error_set(error, EINVAL,
304 			RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
305 			"Invalid start queue ID and queue num! the start queue "
306 			"ID must valid, the queue num must be power of 2 and "
307 			"<= rss_size_max.");
308 	}
309 
310 	for (idx = 1; idx < conf->queue_num; idx++) {
311 		if (conf->queue[idx] != conf->queue[idx - 1] + 1)
312 			return rte_flow_error_set(error, EINVAL,
313 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
314 				"Invalid queue ID sequence! the queue ID "
315 				"must be continuous increment.");
316 	}
317 
318 	rule->queue_id = conf->queue[0];
319 	rule->nb_queues = conf->queue_num;
320 	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
321 	return 0;
322 }
323 
324 /*
325  * Parse actions structure from the provided pattern.
326  * The pattern is validated as the items are copied.
327  *
328  * @param actions[in]
329  * @param rule[out]
330  *   NIC specfilc actions derived from the actions.
331  * @param error[out]
332  */
333 static int
hns3_handle_actions(struct rte_eth_dev * dev,const struct rte_flow_action actions[],struct hns3_fdir_rule * rule,struct rte_flow_error * error)334 hns3_handle_actions(struct rte_eth_dev *dev,
335 		    const struct rte_flow_action actions[],
336 		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
337 {
338 	struct hns3_adapter *hns = dev->data->dev_private;
339 	const struct rte_flow_action_count *act_count;
340 	const struct rte_flow_action_mark *mark;
341 	struct hns3_pf *pf = &hns->pf;
342 	uint32_t counter_num;
343 	int ret;
344 
345 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
346 		switch (actions->type) {
347 		case RTE_FLOW_ACTION_TYPE_QUEUE:
348 			ret = hns3_handle_action_queue(dev, actions, rule,
349 						       error);
350 			if (ret)
351 				return ret;
352 			break;
353 		case RTE_FLOW_ACTION_TYPE_DROP:
354 			rule->action = HNS3_FD_ACTION_DROP_PACKET;
355 			break;
356 		/*
357 		 * Here RSS's real action is queue region.
358 		 * Queue region is implemented by FDIR + RSS in hns3 hardware,
359 		 * the FDIR's action is one queue region (start_queue_id and
360 		 * queue_num), then RSS spread packets to the queue region by
361 		 * RSS algorigthm.
362 		 */
363 		case RTE_FLOW_ACTION_TYPE_RSS:
364 			ret = hns3_handle_action_queue_region(dev, actions,
365 							      rule, error);
366 			if (ret)
367 				return ret;
368 			break;
369 		case RTE_FLOW_ACTION_TYPE_MARK:
370 			mark =
371 			    (const struct rte_flow_action_mark *)actions->conf;
372 			if (mark->id >= HNS3_MAX_FILTER_ID)
373 				return rte_flow_error_set(error, EINVAL,
374 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
375 						actions,
376 						"Invalid Mark ID");
377 			rule->fd_id = mark->id;
378 			rule->flags |= HNS3_RULE_FLAG_FDID;
379 			break;
380 		case RTE_FLOW_ACTION_TYPE_FLAG:
381 			rule->fd_id = HNS3_MAX_FILTER_ID;
382 			rule->flags |= HNS3_RULE_FLAG_FDID;
383 			break;
384 		case RTE_FLOW_ACTION_TYPE_COUNT:
385 			act_count =
386 			    (const struct rte_flow_action_count *)actions->conf;
387 			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
388 			if (act_count->id >= counter_num)
389 				return rte_flow_error_set(error, EINVAL,
390 						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
391 						actions,
392 						"Invalid counter id");
393 			rule->act_cnt = *act_count;
394 			rule->flags |= HNS3_RULE_FLAG_COUNTER;
395 			break;
396 		case RTE_FLOW_ACTION_TYPE_VOID:
397 			break;
398 		default:
399 			return rte_flow_error_set(error, ENOTSUP,
400 						  RTE_FLOW_ERROR_TYPE_ACTION,
401 						  NULL, "Unsupported action");
402 		}
403 	}
404 
405 	return 0;
406 }
407 
408 /* Parse to get the attr and action info of flow director rule. */
409 static int
hns3_check_attr(const struct rte_flow_attr * attr,struct rte_flow_error * error)410 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
411 {
412 	if (!attr->ingress)
413 		return rte_flow_error_set(error, EINVAL,
414 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
415 					  attr, "Ingress can't be zero");
416 	if (attr->egress)
417 		return rte_flow_error_set(error, ENOTSUP,
418 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
419 					  attr, "Not support egress");
420 	if (attr->transfer)
421 		return rte_flow_error_set(error, ENOTSUP,
422 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
423 					  attr, "No support for transfer");
424 	if (attr->priority)
425 		return rte_flow_error_set(error, ENOTSUP,
426 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
427 					  attr, "Not support priority");
428 	if (attr->group)
429 		return rte_flow_error_set(error, ENOTSUP,
430 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
431 					  attr, "Not support group");
432 	return 0;
433 }
434 
435 static int
hns3_parse_eth(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)436 hns3_parse_eth(const struct rte_flow_item *item,
437 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
438 {
439 	const struct rte_flow_item_eth *eth_spec;
440 	const struct rte_flow_item_eth *eth_mask;
441 
442 	if (item->spec == NULL && item->mask)
443 		return rte_flow_error_set(error, EINVAL,
444 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
445 					  "Can't configure FDIR with mask but without spec");
446 
447 	/* Only used to describe the protocol stack. */
448 	if (item->spec == NULL && item->mask == NULL)
449 		return 0;
450 
451 	if (item->mask) {
452 		eth_mask = item->mask;
453 		if (eth_mask->type) {
454 			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
455 			rule->key_conf.mask.ether_type =
456 			    rte_be_to_cpu_16(eth_mask->type);
457 		}
458 		if (!rte_is_zero_ether_addr(&eth_mask->src)) {
459 			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
460 			memcpy(rule->key_conf.mask.src_mac,
461 			       eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
462 		}
463 		if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
464 			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
465 			memcpy(rule->key_conf.mask.dst_mac,
466 			       eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
467 		}
468 	}
469 
470 	eth_spec = item->spec;
471 	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
472 	memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
473 	       RTE_ETHER_ADDR_LEN);
474 	memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
475 	       RTE_ETHER_ADDR_LEN);
476 	return 0;
477 }
478 
479 static int
hns3_parse_vlan(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)480 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
481 		struct rte_flow_error *error)
482 {
483 	const struct rte_flow_item_vlan *vlan_spec;
484 	const struct rte_flow_item_vlan *vlan_mask;
485 
486 	if (item->spec == NULL && item->mask)
487 		return rte_flow_error_set(error, EINVAL,
488 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
489 					  "Can't configure FDIR with mask but without spec");
490 
491 	rule->key_conf.vlan_num++;
492 	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
493 		return rte_flow_error_set(error, EINVAL,
494 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
495 					  "Vlan_num is more than 2");
496 
497 	/* Only used to describe the protocol stack. */
498 	if (item->spec == NULL && item->mask == NULL)
499 		return 0;
500 
501 	if (item->mask) {
502 		vlan_mask = item->mask;
503 		if (vlan_mask->tci) {
504 			if (rule->key_conf.vlan_num == 1) {
505 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
506 					     1);
507 				rule->key_conf.mask.vlan_tag1 =
508 				    rte_be_to_cpu_16(vlan_mask->tci);
509 			} else {
510 				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
511 					     1);
512 				rule->key_conf.mask.vlan_tag2 =
513 				    rte_be_to_cpu_16(vlan_mask->tci);
514 			}
515 		}
516 	}
517 
518 	vlan_spec = item->spec;
519 	if (rule->key_conf.vlan_num == 1)
520 		rule->key_conf.spec.vlan_tag1 =
521 		    rte_be_to_cpu_16(vlan_spec->tci);
522 	else
523 		rule->key_conf.spec.vlan_tag2 =
524 		    rte_be_to_cpu_16(vlan_spec->tci);
525 	return 0;
526 }
527 
528 static int
hns3_parse_ipv4(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)529 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
530 		struct rte_flow_error *error)
531 {
532 	const struct rte_flow_item_ipv4 *ipv4_spec;
533 	const struct rte_flow_item_ipv4 *ipv4_mask;
534 
535 	if (item->spec == NULL && item->mask)
536 		return rte_flow_error_set(error, EINVAL,
537 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
538 					  "Can't configure FDIR with mask but without spec");
539 
540 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
541 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
542 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
543 	/* Only used to describe the protocol stack. */
544 	if (item->spec == NULL && item->mask == NULL)
545 		return 0;
546 
547 	if (item->mask) {
548 		ipv4_mask = item->mask;
549 		if (ipv4_mask->hdr.total_length ||
550 		    ipv4_mask->hdr.packet_id ||
551 		    ipv4_mask->hdr.fragment_offset ||
552 		    ipv4_mask->hdr.time_to_live ||
553 		    ipv4_mask->hdr.hdr_checksum) {
554 			return rte_flow_error_set(error, EINVAL,
555 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
556 						  item,
557 						  "Only support src & dst ip,tos,proto in IPV4");
558 		}
559 
560 		if (ipv4_mask->hdr.src_addr) {
561 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
562 			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
563 			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
564 		}
565 
566 		if (ipv4_mask->hdr.dst_addr) {
567 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
568 			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
569 			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
570 		}
571 
572 		if (ipv4_mask->hdr.type_of_service) {
573 			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
574 			rule->key_conf.mask.ip_tos =
575 			    ipv4_mask->hdr.type_of_service;
576 		}
577 
578 		if (ipv4_mask->hdr.next_proto_id) {
579 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
580 			rule->key_conf.mask.ip_proto =
581 			    ipv4_mask->hdr.next_proto_id;
582 		}
583 	}
584 
585 	ipv4_spec = item->spec;
586 	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
587 	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
588 	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
589 	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
590 	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
591 	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
592 	return 0;
593 }
594 
595 static int
hns3_parse_ipv6(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)596 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
597 		struct rte_flow_error *error)
598 {
599 	const struct rte_flow_item_ipv6 *ipv6_spec;
600 	const struct rte_flow_item_ipv6 *ipv6_mask;
601 
602 	if (item->spec == NULL && item->mask)
603 		return rte_flow_error_set(error, EINVAL,
604 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
605 					  "Can't configure FDIR with mask but without spec");
606 
607 	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
608 	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
609 	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
610 
611 	/* Only used to describe the protocol stack. */
612 	if (item->spec == NULL && item->mask == NULL)
613 		return 0;
614 
615 	if (item->mask) {
616 		ipv6_mask = item->mask;
617 		if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
618 		    ipv6_mask->hdr.hop_limits) {
619 			return rte_flow_error_set(error, EINVAL,
620 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
621 						  item,
622 						  "Only support src & dst ip,proto in IPV6");
623 		}
624 		net_addr_to_host(rule->key_conf.mask.src_ip,
625 				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
626 				 IP_ADDR_LEN);
627 		net_addr_to_host(rule->key_conf.mask.dst_ip,
628 				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
629 				 IP_ADDR_LEN);
630 		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
631 		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
632 			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
633 		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
634 			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
635 		if (ipv6_mask->hdr.proto)
636 			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
637 	}
638 
639 	ipv6_spec = item->spec;
640 	net_addr_to_host(rule->key_conf.spec.src_ip,
641 			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
642 			 IP_ADDR_LEN);
643 	net_addr_to_host(rule->key_conf.spec.dst_ip,
644 			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
645 			 IP_ADDR_LEN);
646 	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
647 
648 	return 0;
649 }
650 
651 static int
hns3_parse_tcp(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)652 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
653 	       struct rte_flow_error *error)
654 {
655 	const struct rte_flow_item_tcp *tcp_spec;
656 	const struct rte_flow_item_tcp *tcp_mask;
657 
658 	if (item->spec == NULL && item->mask)
659 		return rte_flow_error_set(error, EINVAL,
660 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
661 					  "Can't configure FDIR with mask but without spec");
662 
663 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
664 	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
665 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
666 
667 	/* Only used to describe the protocol stack. */
668 	if (item->spec == NULL && item->mask == NULL)
669 		return 0;
670 
671 	if (item->mask) {
672 		tcp_mask = item->mask;
673 		if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
674 		    tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
675 		    tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
676 		    tcp_mask->hdr.tcp_urp) {
677 			return rte_flow_error_set(error, EINVAL,
678 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
679 						  item,
680 						  "Only support src & dst port in TCP");
681 		}
682 
683 		if (tcp_mask->hdr.src_port) {
684 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
685 			rule->key_conf.mask.src_port =
686 			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
687 		}
688 		if (tcp_mask->hdr.dst_port) {
689 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
690 			rule->key_conf.mask.dst_port =
691 			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
692 		}
693 	}
694 
695 	tcp_spec = item->spec;
696 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
697 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
698 
699 	return 0;
700 }
701 
702 static int
hns3_parse_udp(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)703 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
704 	       struct rte_flow_error *error)
705 {
706 	const struct rte_flow_item_udp *udp_spec;
707 	const struct rte_flow_item_udp *udp_mask;
708 
709 	if (item->spec == NULL && item->mask)
710 		return rte_flow_error_set(error, EINVAL,
711 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
712 					  "Can't configure FDIR with mask but without spec");
713 
714 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
715 	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
716 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
717 	/* Only used to describe the protocol stack. */
718 	if (item->spec == NULL && item->mask == NULL)
719 		return 0;
720 
721 	if (item->mask) {
722 		udp_mask = item->mask;
723 		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
724 			return rte_flow_error_set(error, EINVAL,
725 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
726 						  item,
727 						  "Only support src & dst port in UDP");
728 		}
729 		if (udp_mask->hdr.src_port) {
730 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
731 			rule->key_conf.mask.src_port =
732 			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
733 		}
734 		if (udp_mask->hdr.dst_port) {
735 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
736 			rule->key_conf.mask.dst_port =
737 			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
738 		}
739 	}
740 
741 	udp_spec = item->spec;
742 	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
743 	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
744 
745 	return 0;
746 }
747 
748 static int
hns3_parse_sctp(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)749 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
750 		struct rte_flow_error *error)
751 {
752 	const struct rte_flow_item_sctp *sctp_spec;
753 	const struct rte_flow_item_sctp *sctp_mask;
754 
755 	if (item->spec == NULL && item->mask)
756 		return rte_flow_error_set(error, EINVAL,
757 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
758 					  "Can't configure FDIR with mask but without spec");
759 
760 	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
761 	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
762 	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
763 
764 	/* Only used to describe the protocol stack. */
765 	if (item->spec == NULL && item->mask == NULL)
766 		return 0;
767 
768 	if (item->mask) {
769 		sctp_mask = item->mask;
770 		if (sctp_mask->hdr.cksum)
771 			return rte_flow_error_set(error, EINVAL,
772 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
773 						  item,
774 						  "Only support src & dst port in SCTP");
775 		if (sctp_mask->hdr.src_port) {
776 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
777 			rule->key_conf.mask.src_port =
778 			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
779 		}
780 		if (sctp_mask->hdr.dst_port) {
781 			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
782 			rule->key_conf.mask.dst_port =
783 			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
784 		}
785 		if (sctp_mask->hdr.tag) {
786 			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
787 			rule->key_conf.mask.sctp_tag =
788 			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
789 		}
790 	}
791 
792 	sctp_spec = item->spec;
793 	rule->key_conf.spec.src_port =
794 	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
795 	rule->key_conf.spec.dst_port =
796 	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
797 	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
798 
799 	return 0;
800 }
801 
802 /*
803  * Check items before tunnel, save inner configs to outer configs,and clear
804  * inner configs.
805  * The key consists of two parts: meta_data and tuple keys.
806  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
807  * packet(1bit).
808  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
809  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
810  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
811  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
812  * Vlantag2(16bit) and sctp-tag(32bit).
813  */
814 static int
hns3_handle_tunnel(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)815 hns3_handle_tunnel(const struct rte_flow_item *item,
816 		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
817 {
818 	/* check eth config */
819 	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
820 		return rte_flow_error_set(error, EINVAL,
821 					  RTE_FLOW_ERROR_TYPE_ITEM,
822 					  item, "Outer eth mac is unsupported");
823 	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
824 		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
825 		rule->key_conf.spec.outer_ether_type =
826 		    rule->key_conf.spec.ether_type;
827 		rule->key_conf.mask.outer_ether_type =
828 		    rule->key_conf.mask.ether_type;
829 		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
830 		rule->key_conf.spec.ether_type = 0;
831 		rule->key_conf.mask.ether_type = 0;
832 	}
833 
834 	/* check vlan config */
835 	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
836 		return rte_flow_error_set(error, EINVAL,
837 					  RTE_FLOW_ERROR_TYPE_ITEM,
838 					  item,
839 					  "Outer vlan tags is unsupported");
840 
841 	/* clear vlan_num for inner vlan select */
842 	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
843 	rule->key_conf.vlan_num = 0;
844 
845 	/* check L3 config */
846 	if (rule->input_set &
847 	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
848 		return rte_flow_error_set(error, EINVAL,
849 					  RTE_FLOW_ERROR_TYPE_ITEM,
850 					  item, "Outer ip is unsupported");
851 	if (rule->input_set & BIT(INNER_IP_PROTO)) {
852 		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
853 		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
854 		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
855 		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
856 		rule->key_conf.spec.ip_proto = 0;
857 		rule->key_conf.mask.ip_proto = 0;
858 	}
859 
860 	/* check L4 config */
861 	if (rule->input_set & BIT(INNER_SCTP_TAG))
862 		return rte_flow_error_set(error, EINVAL,
863 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
864 					  "Outer sctp tag is unsupported");
865 
866 	if (rule->input_set & BIT(INNER_SRC_PORT)) {
867 		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
868 		rule->key_conf.spec.outer_src_port =
869 		    rule->key_conf.spec.src_port;
870 		rule->key_conf.mask.outer_src_port =
871 		    rule->key_conf.mask.src_port;
872 		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
873 		rule->key_conf.spec.src_port = 0;
874 		rule->key_conf.mask.src_port = 0;
875 	}
876 	if (rule->input_set & BIT(INNER_DST_PORT)) {
877 		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
878 		rule->key_conf.spec.dst_port = 0;
879 		rule->key_conf.mask.dst_port = 0;
880 	}
881 	return 0;
882 }
883 
884 static int
hns3_parse_vxlan(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)885 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
886 		 struct rte_flow_error *error)
887 {
888 	const struct rte_flow_item_vxlan *vxlan_spec;
889 	const struct rte_flow_item_vxlan *vxlan_mask;
890 
891 	if (item->spec == NULL && item->mask)
892 		return rte_flow_error_set(error, EINVAL,
893 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
894 					  "Can't configure FDIR with mask but without spec");
895 	else if (item->spec && (item->mask == NULL))
896 		return rte_flow_error_set(error, EINVAL,
897 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
898 					  "Tunnel packets must configure with mask");
899 
900 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
901 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
902 	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
903 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
904 	else
905 		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
906 
907 	/* Only used to describe the protocol stack. */
908 	if (item->spec == NULL && item->mask == NULL)
909 		return 0;
910 
911 	vxlan_mask = item->mask;
912 	vxlan_spec = item->spec;
913 
914 	if (vxlan_mask->flags)
915 		return rte_flow_error_set(error, EINVAL,
916 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
917 					  "Flags is not supported in VxLAN");
918 
919 	/* VNI must be totally masked or not. */
920 	if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
921 	    memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
922 		return rte_flow_error_set(error, EINVAL,
923 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
924 					  "VNI must be totally masked or not in VxLAN");
925 	if (vxlan_mask->vni[0]) {
926 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
927 		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
928 			   VNI_OR_TNI_LEN);
929 	}
930 	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
931 		   VNI_OR_TNI_LEN);
932 	return 0;
933 }
934 
935 static int
hns3_parse_nvgre(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)936 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
937 		 struct rte_flow_error *error)
938 {
939 	const struct rte_flow_item_nvgre *nvgre_spec;
940 	const struct rte_flow_item_nvgre *nvgre_mask;
941 
942 	if (item->spec == NULL && item->mask)
943 		return rte_flow_error_set(error, EINVAL,
944 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
945 					  "Can't configure FDIR with mask but without spec");
946 	else if (item->spec && (item->mask == NULL))
947 		return rte_flow_error_set(error, EINVAL,
948 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
949 					  "Tunnel packets must configure with mask");
950 
951 	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
952 	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
953 	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
954 
955 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
956 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
957 	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
958 	/* Only used to describe the protocol stack. */
959 	if (item->spec == NULL && item->mask == NULL)
960 		return 0;
961 
962 	nvgre_mask = item->mask;
963 	nvgre_spec = item->spec;
964 
965 	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
966 		return rte_flow_error_set(error, EINVAL,
967 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
968 					  "Ver/protocal is not supported in NVGRE");
969 
970 	/* TNI must be totally masked or not. */
971 	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
972 	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
973 		return rte_flow_error_set(error, EINVAL,
974 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
975 					  "TNI must be totally masked or not in NVGRE");
976 
977 	if (nvgre_mask->tni[0]) {
978 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
979 		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
980 			   VNI_OR_TNI_LEN);
981 	}
982 	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
983 		   VNI_OR_TNI_LEN);
984 
985 	if (nvgre_mask->flow_id) {
986 		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
987 		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
988 	}
989 	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
990 	return 0;
991 }
992 
993 static int
hns3_parse_geneve(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)994 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
995 		  struct rte_flow_error *error)
996 {
997 	const struct rte_flow_item_geneve *geneve_spec;
998 	const struct rte_flow_item_geneve *geneve_mask;
999 
1000 	if (item->spec == NULL && item->mask)
1001 		return rte_flow_error_set(error, EINVAL,
1002 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1003 					  "Can't configure FDIR with mask but without spec");
1004 	else if (item->spec && (item->mask == NULL))
1005 		return rte_flow_error_set(error, EINVAL,
1006 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1007 					  "Tunnel packets must configure with mask");
1008 
1009 	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1010 	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1011 	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1012 	/* Only used to describe the protocol stack. */
1013 	if (item->spec == NULL && item->mask == NULL)
1014 		return 0;
1015 
1016 	geneve_mask = item->mask;
1017 	geneve_spec = item->spec;
1018 
1019 	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1020 		return rte_flow_error_set(error, EINVAL,
1021 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1022 					  "Ver/protocal is not supported in GENEVE");
1023 	/* VNI must be totally masked or not. */
1024 	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1025 	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1026 		return rte_flow_error_set(error, EINVAL,
1027 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1028 					  "VNI must be totally masked or not in GENEVE");
1029 	if (geneve_mask->vni[0]) {
1030 		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1031 		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1032 			   VNI_OR_TNI_LEN);
1033 	}
1034 	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1035 		   VNI_OR_TNI_LEN);
1036 	return 0;
1037 }
1038 
1039 static int
hns3_parse_tunnel(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct rte_flow_error * error)1040 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1041 		  struct rte_flow_error *error)
1042 {
1043 	int ret;
1044 
1045 	switch (item->type) {
1046 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1047 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1048 		ret = hns3_parse_vxlan(item, rule, error);
1049 		break;
1050 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1051 		ret = hns3_parse_nvgre(item, rule, error);
1052 		break;
1053 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1054 		ret = hns3_parse_geneve(item, rule, error);
1055 		break;
1056 	default:
1057 		return rte_flow_error_set(error, ENOTSUP,
1058 					  RTE_FLOW_ERROR_TYPE_ITEM,
1059 					  NULL, "Unsupported tunnel type!");
1060 	}
1061 	if (ret)
1062 		return ret;
1063 	return hns3_handle_tunnel(item, rule, error);
1064 }
1065 
1066 static int
hns3_parse_normal(const struct rte_flow_item * item,struct hns3_fdir_rule * rule,struct items_step_mngr * step_mngr,struct rte_flow_error * error)1067 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1068 		  struct items_step_mngr *step_mngr,
1069 		  struct rte_flow_error *error)
1070 {
1071 	int ret;
1072 
1073 	switch (item->type) {
1074 	case RTE_FLOW_ITEM_TYPE_ETH:
1075 		ret = hns3_parse_eth(item, rule, error);
1076 		step_mngr->items = L2_next_items;
1077 		step_mngr->count = ARRAY_SIZE(L2_next_items);
1078 		break;
1079 	case RTE_FLOW_ITEM_TYPE_VLAN:
1080 		ret = hns3_parse_vlan(item, rule, error);
1081 		step_mngr->items = L2_next_items;
1082 		step_mngr->count = ARRAY_SIZE(L2_next_items);
1083 		break;
1084 	case RTE_FLOW_ITEM_TYPE_IPV4:
1085 		ret = hns3_parse_ipv4(item, rule, error);
1086 		step_mngr->items = L3_next_items;
1087 		step_mngr->count = ARRAY_SIZE(L3_next_items);
1088 		break;
1089 	case RTE_FLOW_ITEM_TYPE_IPV6:
1090 		ret = hns3_parse_ipv6(item, rule, error);
1091 		step_mngr->items = L3_next_items;
1092 		step_mngr->count = ARRAY_SIZE(L3_next_items);
1093 		break;
1094 	case RTE_FLOW_ITEM_TYPE_TCP:
1095 		ret = hns3_parse_tcp(item, rule, error);
1096 		step_mngr->items = L4_next_items;
1097 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1098 		break;
1099 	case RTE_FLOW_ITEM_TYPE_UDP:
1100 		ret = hns3_parse_udp(item, rule, error);
1101 		step_mngr->items = L4_next_items;
1102 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1103 		break;
1104 	case RTE_FLOW_ITEM_TYPE_SCTP:
1105 		ret = hns3_parse_sctp(item, rule, error);
1106 		step_mngr->items = L4_next_items;
1107 		step_mngr->count = ARRAY_SIZE(L4_next_items);
1108 		break;
1109 	default:
1110 		return rte_flow_error_set(error, ENOTSUP,
1111 					  RTE_FLOW_ERROR_TYPE_ITEM,
1112 					  NULL, "Unsupported normal type!");
1113 	}
1114 
1115 	return ret;
1116 }
1117 
1118 static int
hns3_validate_item(const struct rte_flow_item * item,struct items_step_mngr step_mngr,struct rte_flow_error * error)1119 hns3_validate_item(const struct rte_flow_item *item,
1120 		   struct items_step_mngr step_mngr,
1121 		   struct rte_flow_error *error)
1122 {
1123 	int i;
1124 
1125 	if (item->last)
1126 		return rte_flow_error_set(error, ENOTSUP,
1127 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1128 					  "Not supported last point for range");
1129 
1130 	for (i = 0; i < step_mngr.count; i++) {
1131 		if (item->type == step_mngr.items[i])
1132 			break;
1133 	}
1134 
1135 	if (i == step_mngr.count) {
1136 		return rte_flow_error_set(error, EINVAL,
1137 					  RTE_FLOW_ERROR_TYPE_ITEM,
1138 					  item, "Inval or missing item");
1139 	}
1140 	return 0;
1141 }
1142 
1143 static inline bool
is_tunnel_packet(enum rte_flow_item_type type)1144 is_tunnel_packet(enum rte_flow_item_type type)
1145 {
1146 	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1147 	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1148 	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1149 	    type == RTE_FLOW_ITEM_TYPE_GENEVE ||
1150 	    type == RTE_FLOW_ITEM_TYPE_MPLS)
1151 		return true;
1152 	return false;
1153 }
1154 
1155 /*
1156  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1157  * And get the flow director filter info BTW.
1158  * UDP/TCP/SCTP PATTERN:
1159  * The first not void item can be ETH or IPV4 or IPV6
1160  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1161  * The next not void item could be UDP or TCP or SCTP (optional)
1162  * The next not void item could be RAW (for flexbyte, optional)
1163  * The next not void item must be END.
1164  * A Fuzzy Match pattern can appear at any place before END.
1165  * Fuzzy Match is optional for IPV4 but is required for IPV6
1166  * MAC VLAN PATTERN:
1167  * The first not void item must be ETH.
1168  * The second not void item must be MAC VLAN.
1169  * The next not void item must be END.
1170  * ACTION:
1171  * The first not void action should be QUEUE or DROP.
1172  * The second not void optional action should be MARK,
1173  * mark_id is a uint32_t number.
1174  * The next not void action should be END.
1175  * UDP/TCP/SCTP pattern example:
1176  * ITEM		Spec			Mask
1177  * ETH		NULL			NULL
1178  * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
1179  *		dst_addr 192.167.3.50	0xFFFFFFFF
1180  * UDP/TCP/SCTP	src_port	80	0xFFFF
1181  *		dst_port	80	0xFFFF
1182  * END
1183  * MAC VLAN pattern example:
1184  * ITEM		Spec			Mask
1185  * ETH		dst_addr
1186 		{0xAC, 0x7B, 0xA1,	{0xFF, 0xFF, 0xFF,
1187 		0x2C, 0x6D, 0x36}	0xFF, 0xFF, 0xFF}
1188  * MAC VLAN	tci	0x2016		0xEFFF
1189  * END
1190  * Other members in mask and spec should set to 0x00.
1191  * Item->last should be NULL.
1192  */
1193 static int
hns3_parse_fdir_filter(struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct hns3_fdir_rule * rule,struct rte_flow_error * error)1194 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1195 		       const struct rte_flow_item pattern[],
1196 		       const struct rte_flow_action actions[],
1197 		       struct hns3_fdir_rule *rule,
1198 		       struct rte_flow_error *error)
1199 {
1200 	struct hns3_adapter *hns = dev->data->dev_private;
1201 	const struct rte_flow_item *item;
1202 	struct items_step_mngr step_mngr;
1203 	int ret;
1204 
1205 	/* FDIR is available only in PF driver */
1206 	if (hns->is_vf)
1207 		return rte_flow_error_set(error, ENOTSUP,
1208 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1209 					  "Fdir not supported in VF");
1210 
1211 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
1212 		return rte_flow_error_set(error, ENOTSUP,
1213 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1214 					  "fdir_conf.mode isn't perfect");
1215 
1216 	step_mngr.items = first_items;
1217 	step_mngr.count = ARRAY_SIZE(first_items);
1218 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1219 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1220 			continue;
1221 
1222 		ret = hns3_validate_item(item, step_mngr, error);
1223 		if (ret)
1224 			return ret;
1225 
1226 		if (is_tunnel_packet(item->type)) {
1227 			ret = hns3_parse_tunnel(item, rule, error);
1228 			if (ret)
1229 				return ret;
1230 			step_mngr.items = tunnel_next_items;
1231 			step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1232 		} else {
1233 			ret = hns3_parse_normal(item, rule, &step_mngr, error);
1234 			if (ret)
1235 				return ret;
1236 		}
1237 	}
1238 
1239 	return hns3_handle_actions(dev, actions, rule, error);
1240 }
1241 
1242 void
hns3_filterlist_init(struct rte_eth_dev * dev)1243 hns3_filterlist_init(struct rte_eth_dev *dev)
1244 {
1245 	struct hns3_process_private *process_list = dev->process_private;
1246 
1247 	TAILQ_INIT(&process_list->fdir_list);
1248 	TAILQ_INIT(&process_list->filter_rss_list);
1249 	TAILQ_INIT(&process_list->flow_list);
1250 }
1251 
1252 static void
hns3_filterlist_flush(struct rte_eth_dev * dev)1253 hns3_filterlist_flush(struct rte_eth_dev *dev)
1254 {
1255 	struct hns3_process_private *process_list = dev->process_private;
1256 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1257 	struct hns3_rss_conf_ele *rss_filter_ptr;
1258 	struct hns3_flow_mem *flow_node;
1259 
1260 	fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1261 	while (fdir_rule_ptr) {
1262 		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1263 		rte_free(fdir_rule_ptr);
1264 		fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1265 	}
1266 
1267 	rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1268 	while (rss_filter_ptr) {
1269 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1270 			     entries);
1271 		rte_free(rss_filter_ptr);
1272 		rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1273 	}
1274 
1275 	flow_node = TAILQ_FIRST(&process_list->flow_list);
1276 	while (flow_node) {
1277 		TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1278 		rte_free(flow_node->flow);
1279 		rte_free(flow_node);
1280 		flow_node = TAILQ_FIRST(&process_list->flow_list);
1281 	}
1282 }
1283 
1284 static bool
hns3_action_rss_same(const struct rte_flow_action_rss * comp,const struct rte_flow_action_rss * with)1285 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1286 		     const struct rte_flow_action_rss *with)
1287 {
1288 	bool func_is_same;
1289 
1290 	/*
1291 	 * When user flush all RSS rule, RSS func is set invalid with
1292 	 * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1293 	 * flushed, any validate RSS func is different with it before
1294 	 * flushed. Others, when user create an action RSS with RSS func
1295 	 * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1296 	 * between continuous RSS flow.
1297 	 */
1298 	if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1299 		func_is_same = false;
1300 	else
1301 		func_is_same = (with->func ? (comp->func == with->func) : true);
1302 
1303 	return (func_is_same &&
1304 		comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1305 		comp->level == with->level && comp->key_len == with->key_len &&
1306 		comp->queue_num == with->queue_num &&
1307 		!memcmp(comp->key, with->key, with->key_len) &&
1308 		!memcmp(comp->queue, with->queue,
1309 			sizeof(*with->queue) * with->queue_num));
1310 }
1311 
1312 static int
hns3_rss_conf_copy(struct hns3_rss_conf * out,const struct rte_flow_action_rss * in)1313 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1314 		   const struct rte_flow_action_rss *in)
1315 {
1316 	if (in->key_len > RTE_DIM(out->key) ||
1317 	    in->queue_num > RTE_DIM(out->queue))
1318 		return -EINVAL;
1319 	if (in->key == NULL && in->key_len)
1320 		return -EINVAL;
1321 	out->conf = (struct rte_flow_action_rss) {
1322 		.func = in->func,
1323 		.level = in->level,
1324 		.types = in->types,
1325 		.key_len = in->key_len,
1326 		.queue_num = in->queue_num,
1327 	};
1328 	out->conf.queue = memcpy(out->queue, in->queue,
1329 				sizeof(*in->queue) * in->queue_num);
1330 	if (in->key)
1331 		out->conf.key = memcpy(out->key, in->key, in->key_len);
1332 
1333 	return 0;
1334 }
1335 
1336 /*
1337  * This function is used to parse rss action validatation.
1338  */
1339 static int
hns3_parse_rss_filter(struct rte_eth_dev * dev,const struct rte_flow_action * actions,struct rte_flow_error * error)1340 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1341 		      const struct rte_flow_action *actions,
1342 		      struct rte_flow_error *error)
1343 {
1344 	struct hns3_adapter *hns = dev->data->dev_private;
1345 	struct hns3_hw *hw = &hns->hw;
1346 	struct hns3_rss_conf *rss_conf = &hw->rss_info;
1347 	const struct rte_flow_action_rss *rss;
1348 	const struct rte_flow_action *act;
1349 	uint32_t act_index = 0;
1350 	uint16_t n;
1351 
1352 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1353 	rss = act->conf;
1354 
1355 	if (rss == NULL) {
1356 		return rte_flow_error_set(error, EINVAL,
1357 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1358 					  act, "no valid queues");
1359 	}
1360 
1361 	if (rss->queue_num > RTE_DIM(rss_conf->queue))
1362 		return rte_flow_error_set(error, ENOTSUP,
1363 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1364 					  "queue number configured exceeds "
1365 					  "queue buffer size driver supported");
1366 
1367 	for (n = 0; n < rss->queue_num; n++) {
1368 		if (rss->queue[n] < hw->alloc_rss_size)
1369 			continue;
1370 		return rte_flow_error_set(error, EINVAL,
1371 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1372 					  "queue id must be less than queue number allocated to a TC");
1373 	}
1374 
1375 	if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1376 		return rte_flow_error_set(error, EINVAL,
1377 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1378 					  act,
1379 					  "Flow types is unsupported by "
1380 					  "hns3's RSS");
1381 	if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1382 		return rte_flow_error_set(error, ENOTSUP,
1383 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1384 					  "RSS hash func are not supported");
1385 	if (rss->level)
1386 		return rte_flow_error_set(error, ENOTSUP,
1387 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1388 					  "a nonzero RSS encapsulation level is not supported");
1389 	if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1390 		return rte_flow_error_set(error, ENOTSUP,
1391 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1392 					  "RSS hash key must be exactly 40 bytes");
1393 
1394 	/*
1395 	 * For Kunpeng920 and Kunpeng930 NIC hardware, it is not supported to
1396 	 * use dst port/src port fields to RSS hash for the following packet
1397 	 * types.
1398 	 * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1399 	 * Besides, for Kunpeng920, The NIC hardware is not supported to use
1400 	 * src/dst port fields to RSS hash for IPV6 SCTP packet type.
1401 	 */
1402 	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
1403 	   (rss->types & ETH_RSS_IP ||
1404 	   (!hw->rss_info.ipv6_sctp_offload_supported &&
1405 	   rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
1406 		return rte_flow_error_set(error, EINVAL,
1407 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1408 					  &rss->types,
1409 					  "input RSS types are not supported");
1410 
1411 	act_index++;
1412 
1413 	/* Check if the next not void action is END */
1414 	NEXT_ITEM_OF_ACTION(act, actions, act_index);
1415 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1416 		memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1417 		return rte_flow_error_set(error, EINVAL,
1418 					  RTE_FLOW_ERROR_TYPE_ACTION,
1419 					  act, "Not supported action.");
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static int
hns3_disable_rss(struct hns3_hw * hw)1426 hns3_disable_rss(struct hns3_hw *hw)
1427 {
1428 	int ret;
1429 
1430 	/* Redirected the redirection table to queue 0 */
1431 	ret = hns3_rss_reset_indir_table(hw);
1432 	if (ret)
1433 		return ret;
1434 
1435 	/* Disable RSS */
1436 	hw->rss_info.conf.types = 0;
1437 	hw->rss_dis_flag = true;
1438 
1439 	return 0;
1440 }
1441 
1442 static void
hns3_parse_rss_key(struct hns3_hw * hw,struct rte_flow_action_rss * rss_conf)1443 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1444 {
1445 	if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1446 		hns3_warn(hw, "Default RSS hash key to be set");
1447 		rss_conf->key = hns3_hash_key;
1448 		rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1449 	}
1450 }
1451 
1452 static int
hns3_parse_rss_algorithm(struct hns3_hw * hw,enum rte_eth_hash_function * func,uint8_t * hash_algo)1453 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1454 			 uint8_t *hash_algo)
1455 {
1456 	enum rte_eth_hash_function algo_func = *func;
1457 	switch (algo_func) {
1458 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1459 		/* Keep *hash_algo as what it used to be */
1460 		algo_func = hw->rss_info.conf.func;
1461 		break;
1462 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1463 		*hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1464 		break;
1465 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1466 		*hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1467 		break;
1468 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1469 		*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1470 		break;
1471 	default:
1472 		hns3_err(hw, "Invalid RSS algorithm configuration(%u)",
1473 			 algo_func);
1474 		return -EINVAL;
1475 	}
1476 	*func = algo_func;
1477 
1478 	return 0;
1479 }
1480 
1481 static int
hns3_hw_rss_hash_set(struct hns3_hw * hw,struct rte_flow_action_rss * rss_config)1482 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1483 {
1484 	struct hns3_rss_tuple_cfg *tuple;
1485 	int ret;
1486 
1487 	hns3_parse_rss_key(hw, rss_config);
1488 
1489 	ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1490 				       &hw->rss_info.hash_algo);
1491 	if (ret)
1492 		return ret;
1493 
1494 	ret = hns3_set_rss_algo_key(hw, rss_config->key);
1495 	if (ret)
1496 		return ret;
1497 
1498 	/* Update algorithm of hw */
1499 	hw->rss_info.conf.func = rss_config->func;
1500 
1501 	/* Set flow type supported */
1502 	tuple = &hw->rss_info.rss_tuple_sets;
1503 	ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1504 	if (ret)
1505 		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1506 
1507 	return ret;
1508 }
1509 
1510 static int
hns3_update_indir_table(struct rte_eth_dev * dev,const struct rte_flow_action_rss * conf,uint16_t num)1511 hns3_update_indir_table(struct rte_eth_dev *dev,
1512 			const struct rte_flow_action_rss *conf, uint16_t num)
1513 {
1514 	struct hns3_adapter *hns = dev->data->dev_private;
1515 	struct hns3_hw *hw = &hns->hw;
1516 	uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE];
1517 	uint16_t j;
1518 	uint32_t i;
1519 
1520 	/* Fill in redirection table */
1521 	memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1522 	       sizeof(hw->rss_info.rss_indirection_tbl));
1523 	for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) {
1524 		j %= num;
1525 		if (conf->queue[j] >= hw->alloc_rss_size) {
1526 			hns3_err(hw, "queue id(%u) set to redirection table "
1527 				 "exceeds queue number(%u) allocated to a TC.",
1528 				 conf->queue[j], hw->alloc_rss_size);
1529 			return -EINVAL;
1530 		}
1531 		indir_tbl[i] = conf->queue[j];
1532 	}
1533 
1534 	return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE);
1535 }
1536 
1537 static int
hns3_config_rss_filter(struct rte_eth_dev * dev,const struct hns3_rss_conf * conf,bool add)1538 hns3_config_rss_filter(struct rte_eth_dev *dev,
1539 		       const struct hns3_rss_conf *conf, bool add)
1540 {
1541 	struct hns3_process_private *process_list = dev->process_private;
1542 	struct hns3_adapter *hns = dev->data->dev_private;
1543 	struct hns3_rss_conf_ele *rss_filter_ptr;
1544 	struct hns3_hw *hw = &hns->hw;
1545 	struct hns3_rss_conf *rss_info;
1546 	uint64_t flow_types;
1547 	uint16_t num;
1548 	int ret;
1549 
1550 	struct rte_flow_action_rss rss_flow_conf = {
1551 		.func = conf->conf.func,
1552 		.level = conf->conf.level,
1553 		.types = conf->conf.types,
1554 		.key_len = conf->conf.key_len,
1555 		.queue_num = conf->conf.queue_num,
1556 		.key = conf->conf.key_len ?
1557 		    (void *)(uintptr_t)conf->conf.key : NULL,
1558 		.queue = conf->conf.queue,
1559 	};
1560 
1561 	/* Filter the unsupported flow types */
1562 	flow_types = conf->conf.types ?
1563 		     rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1564 		     hw->rss_info.conf.types;
1565 	if (flow_types != rss_flow_conf.types)
1566 		hns3_warn(hw, "modified RSS types based on hardware support, "
1567 			      "requested:%" PRIx64 " configured:%" PRIx64,
1568 			  rss_flow_conf.types, flow_types);
1569 	/* Update the useful flow types */
1570 	rss_flow_conf.types = flow_types;
1571 
1572 	rss_info = &hw->rss_info;
1573 	if (!add) {
1574 		if (!conf->valid)
1575 			return 0;
1576 
1577 		ret = hns3_disable_rss(hw);
1578 		if (ret) {
1579 			hns3_err(hw, "RSS disable failed(%d)", ret);
1580 			return ret;
1581 		}
1582 
1583 		if (rss_flow_conf.queue_num) {
1584 			/*
1585 			 * Due the content of queue pointer have been reset to
1586 			 * 0, the rss_info->conf.queue should be set NULL
1587 			 */
1588 			rss_info->conf.queue = NULL;
1589 			rss_info->conf.queue_num = 0;
1590 		}
1591 
1592 		/* set RSS func invalid after flushed */
1593 		rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1594 		return 0;
1595 	}
1596 
1597 	/* Set rx queues to use */
1598 	num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1599 	if (rss_flow_conf.queue_num > num)
1600 		hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1601 			  rss_flow_conf.queue_num);
1602 	hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1603 
1604 	rte_spinlock_lock(&hw->lock);
1605 	if (num) {
1606 		ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1607 		if (ret)
1608 			goto rss_config_err;
1609 	}
1610 
1611 	/* Set hash algorithm and flow types by the user's config */
1612 	ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1613 	if (ret)
1614 		goto rss_config_err;
1615 
1616 	ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1617 	if (ret) {
1618 		hns3_err(hw, "RSS config init fail(%d)", ret);
1619 		goto rss_config_err;
1620 	}
1621 
1622 	/*
1623 	 * When create a new RSS rule, the old rule will be overlaid and set
1624 	 * invalid.
1625 	 */
1626 	TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
1627 		rss_filter_ptr->filter_info.valid = false;
1628 
1629 rss_config_err:
1630 	rte_spinlock_unlock(&hw->lock);
1631 
1632 	return ret;
1633 }
1634 
1635 static int
hns3_clear_rss_filter(struct rte_eth_dev * dev)1636 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1637 {
1638 	struct hns3_process_private *process_list = dev->process_private;
1639 	struct hns3_adapter *hns = dev->data->dev_private;
1640 	struct hns3_rss_conf_ele *rss_filter_ptr;
1641 	struct hns3_hw *hw = &hns->hw;
1642 	int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1643 	int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1644 	int ret = 0;
1645 
1646 	rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1647 	while (rss_filter_ptr) {
1648 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1649 			     entries);
1650 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1651 					     false);
1652 		if (ret)
1653 			rss_rule_fail_cnt++;
1654 		else
1655 			rss_rule_succ_cnt++;
1656 		rte_free(rss_filter_ptr);
1657 		rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1658 	}
1659 
1660 	if (rss_rule_fail_cnt) {
1661 		hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1662 			     "fail num = %d", rss_rule_succ_cnt,
1663 			     rss_rule_fail_cnt);
1664 		ret = -EIO;
1665 	}
1666 
1667 	return ret;
1668 }
1669 
1670 int
hns3_restore_rss_filter(struct rte_eth_dev * dev)1671 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1672 {
1673 	struct hns3_adapter *hns = dev->data->dev_private;
1674 	struct hns3_hw *hw = &hns->hw;
1675 
1676 	/* When user flush all rules, it doesn't need to restore RSS rule */
1677 	if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1678 		return 0;
1679 
1680 	return hns3_config_rss_filter(dev, &hw->rss_info, true);
1681 }
1682 
1683 static int
hns3_flow_parse_rss(struct rte_eth_dev * dev,const struct hns3_rss_conf * conf,bool add)1684 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1685 		    const struct hns3_rss_conf *conf, bool add)
1686 {
1687 	struct hns3_adapter *hns = dev->data->dev_private;
1688 	struct hns3_hw *hw = &hns->hw;
1689 	bool ret;
1690 
1691 	ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1692 	if (ret) {
1693 		hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1694 		return -EINVAL;
1695 	}
1696 
1697 	return hns3_config_rss_filter(dev, conf, add);
1698 }
1699 
1700 static int
hns3_flow_args_check(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1701 hns3_flow_args_check(const struct rte_flow_attr *attr,
1702 		     const struct rte_flow_item pattern[],
1703 		     const struct rte_flow_action actions[],
1704 		     struct rte_flow_error *error)
1705 {
1706 	if (pattern == NULL)
1707 		return rte_flow_error_set(error, EINVAL,
1708 					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1709 					  NULL, "NULL pattern.");
1710 
1711 	if (actions == NULL)
1712 		return rte_flow_error_set(error, EINVAL,
1713 					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1714 					  NULL, "NULL action.");
1715 
1716 	if (attr == NULL)
1717 		return rte_flow_error_set(error, EINVAL,
1718 					  RTE_FLOW_ERROR_TYPE_ATTR,
1719 					  NULL, "NULL attribute.");
1720 
1721 	return hns3_check_attr(attr, error);
1722 }
1723 
1724 /*
1725  * Check if the flow rule is supported by hns3.
1726  * It only checkes the format. Don't guarantee the rule can be programmed into
1727  * the HW. Because there can be no enough room for the rule.
1728  */
1729 static int
hns3_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1730 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1731 		   const struct rte_flow_item pattern[],
1732 		   const struct rte_flow_action actions[],
1733 		   struct rte_flow_error *error)
1734 {
1735 	struct hns3_fdir_rule fdir_rule;
1736 	int ret;
1737 
1738 	ret = hns3_flow_args_check(attr, pattern, actions, error);
1739 	if (ret)
1740 		return ret;
1741 
1742 	if (hns3_find_rss_general_action(pattern, actions))
1743 		return hns3_parse_rss_filter(dev, actions, error);
1744 
1745 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1746 	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1747 }
1748 
1749 /*
1750  * Create or destroy a flow rule.
1751  * Theorically one rule can match more than one filters.
1752  * We will let it use the filter which it hitt first.
1753  * So, the sequence matters.
1754  */
1755 static struct rte_flow *
hns3_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1756 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1757 		 const struct rte_flow_item pattern[],
1758 		 const struct rte_flow_action actions[],
1759 		 struct rte_flow_error *error)
1760 {
1761 	struct hns3_process_private *process_list = dev->process_private;
1762 	struct hns3_adapter *hns = dev->data->dev_private;
1763 	struct hns3_hw *hw = &hns->hw;
1764 	const struct hns3_rss_conf *rss_conf;
1765 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1766 	struct hns3_rss_conf_ele *rss_filter_ptr;
1767 	struct hns3_flow_mem *flow_node;
1768 	const struct rte_flow_action *act;
1769 	struct rte_flow *flow;
1770 	struct hns3_fdir_rule fdir_rule;
1771 	int ret;
1772 
1773 	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1774 	if (ret)
1775 		return NULL;
1776 
1777 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1778 	if (flow == NULL) {
1779 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1780 				   NULL, "Failed to allocate flow memory");
1781 		return NULL;
1782 	}
1783 	flow_node = rte_zmalloc("hns3 flow node",
1784 				sizeof(struct hns3_flow_mem), 0);
1785 	if (flow_node == NULL) {
1786 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1787 				   NULL, "Failed to allocate flow list memory");
1788 		rte_free(flow);
1789 		return NULL;
1790 	}
1791 
1792 	flow_node->flow = flow;
1793 	TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1794 
1795 	act = hns3_find_rss_general_action(pattern, actions);
1796 	if (act) {
1797 		rss_conf = act->conf;
1798 
1799 		ret = hns3_flow_parse_rss(dev, rss_conf, true);
1800 		if (ret)
1801 			goto err;
1802 
1803 		rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1804 					     sizeof(struct hns3_rss_conf_ele),
1805 					     0);
1806 		if (rss_filter_ptr == NULL) {
1807 			hns3_err(hw,
1808 				    "Failed to allocate hns3_rss_filter memory");
1809 			ret = -ENOMEM;
1810 			goto err;
1811 		}
1812 		hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1813 				   &rss_conf->conf);
1814 		rss_filter_ptr->filter_info.valid = true;
1815 		TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1816 				  rss_filter_ptr, entries);
1817 
1818 		flow->rule = rss_filter_ptr;
1819 		flow->filter_type = RTE_ETH_FILTER_HASH;
1820 		return flow;
1821 	}
1822 
1823 	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1824 	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1825 	if (ret)
1826 		goto out;
1827 
1828 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1829 		ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1830 				       fdir_rule.act_cnt.id, error);
1831 		if (ret)
1832 			goto out;
1833 
1834 		flow->counter_id = fdir_rule.act_cnt.id;
1835 	}
1836 	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1837 	if (!ret) {
1838 		fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1839 					    sizeof(struct hns3_fdir_rule_ele),
1840 					    0);
1841 		if (fdir_rule_ptr == NULL) {
1842 			hns3_err(hw, "Failed to allocate fdir_rule memory");
1843 			ret = -ENOMEM;
1844 			goto err_fdir;
1845 		}
1846 
1847 		memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1848 			sizeof(struct hns3_fdir_rule));
1849 		TAILQ_INSERT_TAIL(&process_list->fdir_list,
1850 				  fdir_rule_ptr, entries);
1851 		flow->rule = fdir_rule_ptr;
1852 		flow->filter_type = RTE_ETH_FILTER_FDIR;
1853 
1854 		return flow;
1855 	}
1856 
1857 err_fdir:
1858 	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1859 		hns3_counter_release(dev, fdir_rule.act_cnt.id);
1860 
1861 err:
1862 	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1863 			   "Failed to create flow");
1864 out:
1865 	TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1866 	rte_free(flow_node);
1867 	rte_free(flow);
1868 	return NULL;
1869 }
1870 
1871 /* Destroy a flow rule on hns3. */
1872 static int
hns3_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1873 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1874 		  struct rte_flow_error *error)
1875 {
1876 	struct hns3_process_private *process_list = dev->process_private;
1877 	struct hns3_adapter *hns = dev->data->dev_private;
1878 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
1879 	struct hns3_rss_conf_ele *rss_filter_ptr;
1880 	struct hns3_flow_mem *flow_node;
1881 	enum rte_filter_type filter_type;
1882 	struct hns3_fdir_rule fdir_rule;
1883 	int ret;
1884 
1885 	if (flow == NULL)
1886 		return rte_flow_error_set(error, EINVAL,
1887 					  RTE_FLOW_ERROR_TYPE_HANDLE,
1888 					  flow, "Flow is NULL");
1889 	filter_type = flow->filter_type;
1890 	switch (filter_type) {
1891 	case RTE_ETH_FILTER_FDIR:
1892 		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1893 		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1894 			   sizeof(struct hns3_fdir_rule));
1895 
1896 		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1897 		if (ret)
1898 			return rte_flow_error_set(error, EIO,
1899 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1900 						  flow,
1901 						  "Destroy FDIR fail.Try again");
1902 		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1903 			hns3_counter_release(dev, fdir_rule.act_cnt.id);
1904 		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1905 		rte_free(fdir_rule_ptr);
1906 		fdir_rule_ptr = NULL;
1907 		break;
1908 	case RTE_ETH_FILTER_HASH:
1909 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1910 		ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1911 					     false);
1912 		if (ret)
1913 			return rte_flow_error_set(error, EIO,
1914 						  RTE_FLOW_ERROR_TYPE_HANDLE,
1915 						  flow,
1916 						  "Destroy RSS fail.Try again");
1917 		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1918 			     entries);
1919 		rte_free(rss_filter_ptr);
1920 		rss_filter_ptr = NULL;
1921 		break;
1922 	default:
1923 		return rte_flow_error_set(error, EINVAL,
1924 					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1925 					  "Unsupported filter type");
1926 	}
1927 
1928 	TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1929 		if (flow_node->flow == flow) {
1930 			TAILQ_REMOVE(&process_list->flow_list, flow_node,
1931 				     entries);
1932 			rte_free(flow_node);
1933 			flow_node = NULL;
1934 			break;
1935 		}
1936 	}
1937 	rte_free(flow);
1938 	flow = NULL;
1939 
1940 	return 0;
1941 }
1942 
1943 /*  Destroy all flow rules associated with a port on hns3. */
1944 static int
hns3_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)1945 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1946 {
1947 	struct hns3_adapter *hns = dev->data->dev_private;
1948 	int ret;
1949 
1950 	/* FDIR is available only in PF driver */
1951 	if (!hns->is_vf) {
1952 		ret = hns3_clear_all_fdir_filter(hns);
1953 		if (ret) {
1954 			rte_flow_error_set(error, ret,
1955 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1956 					   NULL, "Failed to flush rule");
1957 			return ret;
1958 		}
1959 		hns3_counter_flush(dev);
1960 	}
1961 
1962 	ret = hns3_clear_rss_filter(dev);
1963 	if (ret) {
1964 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1965 				   NULL, "Failed to flush rss filter");
1966 		return ret;
1967 	}
1968 
1969 	hns3_filterlist_flush(dev);
1970 
1971 	return 0;
1972 }
1973 
1974 /* Query an existing flow rule. */
1975 static int
hns3_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * actions,void * data,struct rte_flow_error * error)1976 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1977 		const struct rte_flow_action *actions, void *data,
1978 		struct rte_flow_error *error)
1979 {
1980 	struct rte_flow_action_rss *rss_conf;
1981 	struct hns3_rss_conf_ele *rss_rule;
1982 	struct rte_flow_query_count *qc;
1983 	int ret;
1984 
1985 	if (!flow->rule)
1986 		return rte_flow_error_set(error, EINVAL,
1987 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
1988 
1989 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1990 		switch (actions->type) {
1991 		case RTE_FLOW_ACTION_TYPE_VOID:
1992 			break;
1993 		case RTE_FLOW_ACTION_TYPE_COUNT:
1994 			qc = (struct rte_flow_query_count *)data;
1995 			ret = hns3_counter_query(dev, flow, qc, error);
1996 			if (ret)
1997 				return ret;
1998 			break;
1999 		case RTE_FLOW_ACTION_TYPE_RSS:
2000 			if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2001 				return rte_flow_error_set(error, ENOTSUP,
2002 					RTE_FLOW_ERROR_TYPE_ACTION,
2003 					actions, "action is not supported");
2004 			}
2005 			rss_conf = (struct rte_flow_action_rss *)data;
2006 			rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2007 			rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2008 				   sizeof(struct rte_flow_action_rss));
2009 			break;
2010 		default:
2011 			return rte_flow_error_set(error, ENOTSUP,
2012 				RTE_FLOW_ERROR_TYPE_ACTION,
2013 				actions, "action is not supported");
2014 		}
2015 	}
2016 
2017 	return 0;
2018 }
2019 
2020 static const struct rte_flow_ops hns3_flow_ops = {
2021 	.validate = hns3_flow_validate,
2022 	.create = hns3_flow_create,
2023 	.destroy = hns3_flow_destroy,
2024 	.flush = hns3_flow_flush,
2025 	.query = hns3_flow_query,
2026 	.isolate = NULL,
2027 };
2028 
2029 /*
2030  * The entry of flow API.
2031  * @param dev
2032  *   Pointer to Ethernet device.
2033  * @return
2034  *   0 on success, a negative errno value otherwise is set.
2035  */
2036 int
hns3_dev_filter_ctrl(struct rte_eth_dev * dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)2037 hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
2038 		     enum rte_filter_op filter_op, void *arg)
2039 {
2040 	struct hns3_hw *hw;
2041 	int ret = 0;
2042 
2043 	hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2044 	switch (filter_type) {
2045 	case RTE_ETH_FILTER_GENERIC:
2046 		if (filter_op != RTE_ETH_FILTER_GET)
2047 			return -EINVAL;
2048 		if (hw->adapter_state >= HNS3_NIC_CLOSED)
2049 			return -ENODEV;
2050 		*(const void **)arg = &hns3_flow_ops;
2051 		break;
2052 	default:
2053 		hns3_err(hw, "Filter type (%d) not supported", filter_type);
2054 		ret = -EOPNOTSUPP;
2055 		break;
2056 	}
2057 
2058 	return ret;
2059 }
2060