xref: /f-stack/dpdk/drivers/net/i40e/rte_pmd_i40e.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_malloc.h>
7 #include <rte_tailq.h>
8 
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_dcb.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_pf.h"
13 #include "i40e_rxtx.h"
14 #include "rte_pmd_i40e.h"
15 
16 int
rte_pmd_i40e_ping_vfs(uint16_t port,uint16_t vf)17 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
18 {
19 	struct rte_eth_dev *dev;
20 	struct i40e_pf *pf;
21 
22 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
23 
24 	dev = &rte_eth_devices[port];
25 
26 	if (!is_i40e_supported(dev))
27 		return -ENOTSUP;
28 
29 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
30 
31 	if (vf >= pf->vf_num || !pf->vfs) {
32 		PMD_DRV_LOG(ERR, "Invalid argument.");
33 		return -EINVAL;
34 	}
35 
36 	i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
37 
38 	return 0;
39 }
40 
41 int
rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port,uint16_t vf_id,uint8_t on)42 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
43 {
44 	struct rte_eth_dev *dev;
45 	struct i40e_pf *pf;
46 	struct i40e_vsi *vsi;
47 	struct i40e_hw *hw;
48 	struct i40e_vsi_context ctxt;
49 	int ret;
50 
51 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52 
53 	dev = &rte_eth_devices[port];
54 
55 	if (!is_i40e_supported(dev))
56 		return -ENOTSUP;
57 
58 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
59 
60 	if (vf_id >= pf->vf_num || !pf->vfs) {
61 		PMD_DRV_LOG(ERR, "Invalid argument.");
62 		return -EINVAL;
63 	}
64 
65 	vsi = pf->vfs[vf_id].vsi;
66 	if (!vsi) {
67 		PMD_DRV_LOG(ERR, "Invalid VSI.");
68 		return -EINVAL;
69 	}
70 
71 	/* Check if it has been already on or off */
72 	if (vsi->info.valid_sections &
73 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
74 		if (on) {
75 			if ((vsi->info.sec_flags &
76 			     I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
77 			    I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
78 				return 0; /* already on */
79 		} else {
80 			if ((vsi->info.sec_flags &
81 			     I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
82 				return 0; /* already off */
83 		}
84 	}
85 
86 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
87 	if (on)
88 		vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
89 	else
90 		vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
91 
92 	memset(&ctxt, 0, sizeof(ctxt));
93 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
94 	ctxt.seid = vsi->seid;
95 
96 	hw = I40E_VSI_TO_HW(vsi);
97 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
98 	if (ret != I40E_SUCCESS) {
99 		ret = -ENOTSUP;
100 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
101 	}
102 
103 	return ret;
104 }
105 
106 static int
i40e_add_rm_all_vlan_filter(struct i40e_vsi * vsi,uint8_t add)107 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
108 {
109 	uint32_t j, k;
110 	uint16_t vlan_id;
111 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
112 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
113 	int ret;
114 
115 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
116 		if (!vsi->vfta[j])
117 			continue;
118 
119 		for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
120 			if (!(vsi->vfta[j] & (1 << k)))
121 				continue;
122 
123 			vlan_id = j * I40E_UINT32_BIT_SIZE + k;
124 			if (!vlan_id)
125 				continue;
126 
127 			vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
128 			if (add)
129 				ret = i40e_aq_add_vlan(hw, vsi->seid,
130 						       &vlan_data, 1, NULL);
131 			else
132 				ret = i40e_aq_remove_vlan(hw, vsi->seid,
133 							  &vlan_data, 1, NULL);
134 			if (ret != I40E_SUCCESS) {
135 				PMD_DRV_LOG(ERR,
136 					    "Failed to add/rm vlan filter");
137 				return ret;
138 			}
139 		}
140 	}
141 
142 	return I40E_SUCCESS;
143 }
144 
145 int
rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port,uint16_t vf_id,uint8_t on)146 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
147 {
148 	struct rte_eth_dev *dev;
149 	struct i40e_pf *pf;
150 	struct i40e_vsi *vsi;
151 	struct i40e_hw *hw;
152 	struct i40e_vsi_context ctxt;
153 	int ret;
154 
155 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
156 
157 	dev = &rte_eth_devices[port];
158 
159 	if (!is_i40e_supported(dev))
160 		return -ENOTSUP;
161 
162 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
163 
164 	if (vf_id >= pf->vf_num || !pf->vfs) {
165 		PMD_DRV_LOG(ERR, "Invalid argument.");
166 		return -EINVAL;
167 	}
168 
169 	vsi = pf->vfs[vf_id].vsi;
170 	if (!vsi) {
171 		PMD_DRV_LOG(ERR, "Invalid VSI.");
172 		return -EINVAL;
173 	}
174 
175 	/* Check if it has been already on or off */
176 	if (vsi->vlan_anti_spoof_on == on)
177 		return 0; /* already on or off */
178 
179 	vsi->vlan_anti_spoof_on = on;
180 	if (!vsi->vlan_filter_on) {
181 		ret = i40e_add_rm_all_vlan_filter(vsi, on);
182 		if (ret) {
183 			PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
184 			return -ENOTSUP;
185 		}
186 	}
187 
188 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
189 	if (on)
190 		vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
191 	else
192 		vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
193 
194 	memset(&ctxt, 0, sizeof(ctxt));
195 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
196 	ctxt.seid = vsi->seid;
197 
198 	hw = I40E_VSI_TO_HW(vsi);
199 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
200 	if (ret != I40E_SUCCESS) {
201 		ret = -ENOTSUP;
202 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
203 	}
204 
205 	return ret;
206 }
207 
208 static int
i40e_vsi_rm_mac_filter(struct i40e_vsi * vsi)209 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
210 {
211 	struct i40e_mac_filter *f;
212 	struct i40e_macvlan_filter *mv_f;
213 	int i, vlan_num;
214 	enum i40e_mac_filter_type filter_type;
215 	int ret = I40E_SUCCESS;
216 	void *temp;
217 
218 	/* remove all the MACs */
219 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
220 		vlan_num = vsi->vlan_num;
221 		filter_type = f->mac_info.filter_type;
222 		if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
223 		    filter_type == I40E_MACVLAN_HASH_MATCH) {
224 			if (vlan_num == 0) {
225 				PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
226 				return I40E_ERR_PARAM;
227 			}
228 		} else if (filter_type == I40E_MAC_PERFECT_MATCH ||
229 			   filter_type == I40E_MAC_HASH_MATCH)
230 			vlan_num = 1;
231 
232 		mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
233 		if (!mv_f) {
234 			PMD_DRV_LOG(ERR, "failed to allocate memory");
235 			return I40E_ERR_NO_MEMORY;
236 		}
237 
238 		for (i = 0; i < vlan_num; i++) {
239 			mv_f[i].filter_type = filter_type;
240 			rte_memcpy(&mv_f[i].macaddr,
241 					 &f->mac_info.mac_addr,
242 					 ETH_ADDR_LEN);
243 		}
244 		if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
245 		    filter_type == I40E_MACVLAN_HASH_MATCH) {
246 			ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
247 							 &f->mac_info.mac_addr);
248 			if (ret != I40E_SUCCESS) {
249 				rte_free(mv_f);
250 				return ret;
251 			}
252 		}
253 
254 		ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
255 		if (ret != I40E_SUCCESS) {
256 			rte_free(mv_f);
257 			return ret;
258 		}
259 
260 		rte_free(mv_f);
261 		ret = I40E_SUCCESS;
262 	}
263 
264 	return ret;
265 }
266 
267 static int
i40e_vsi_restore_mac_filter(struct i40e_vsi * vsi)268 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
269 {
270 	struct i40e_mac_filter *f;
271 	struct i40e_macvlan_filter *mv_f;
272 	int i, vlan_num = 0;
273 	int ret = I40E_SUCCESS;
274 	void *temp;
275 
276 	/* restore all the MACs */
277 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
278 		if (f->mac_info.filter_type == I40E_MACVLAN_PERFECT_MATCH ||
279 		    f->mac_info.filter_type == I40E_MACVLAN_HASH_MATCH) {
280 			/**
281 			 * If vlan_num is 0, that's the first time to add mac,
282 			 * set mask for vlan_id 0.
283 			 */
284 			if (vsi->vlan_num == 0) {
285 				i40e_set_vlan_filter(vsi, 0, 1);
286 				vsi->vlan_num = 1;
287 			}
288 			vlan_num = vsi->vlan_num;
289 		} else if (f->mac_info.filter_type == I40E_MAC_PERFECT_MATCH ||
290 			   f->mac_info.filter_type == I40E_MAC_HASH_MATCH)
291 			vlan_num = 1;
292 
293 		mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
294 		if (!mv_f) {
295 			PMD_DRV_LOG(ERR, "failed to allocate memory");
296 			return I40E_ERR_NO_MEMORY;
297 		}
298 
299 		for (i = 0; i < vlan_num; i++) {
300 			mv_f[i].filter_type = f->mac_info.filter_type;
301 			rte_memcpy(&mv_f[i].macaddr,
302 					 &f->mac_info.mac_addr,
303 					 ETH_ADDR_LEN);
304 		}
305 
306 		if (f->mac_info.filter_type == I40E_MACVLAN_PERFECT_MATCH ||
307 		    f->mac_info.filter_type == I40E_MACVLAN_HASH_MATCH) {
308 			ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
309 							 &f->mac_info.mac_addr);
310 			if (ret != I40E_SUCCESS) {
311 				rte_free(mv_f);
312 				return ret;
313 			}
314 		}
315 
316 		ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
317 		if (ret != I40E_SUCCESS) {
318 			rte_free(mv_f);
319 			return ret;
320 		}
321 
322 		rte_free(mv_f);
323 		ret = I40E_SUCCESS;
324 	}
325 
326 	return ret;
327 }
328 
329 static int
i40e_vsi_set_tx_loopback(struct i40e_vsi * vsi,uint8_t on)330 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
331 {
332 	struct i40e_vsi_context ctxt;
333 	struct i40e_hw *hw;
334 	int ret;
335 
336 	if (!vsi)
337 		return -EINVAL;
338 
339 	hw = I40E_VSI_TO_HW(vsi);
340 
341 	/* Use the FW API if FW >= v5.0 */
342 	if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
343 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
344 		return -ENOTSUP;
345 	}
346 
347 	/* Check if it has been already on or off */
348 	if (vsi->info.valid_sections &
349 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
350 		if (on) {
351 			if ((vsi->info.switch_id &
352 			     I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
353 			    I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
354 				return 0; /* already on */
355 		} else {
356 			if ((vsi->info.switch_id &
357 			     I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
358 				return 0; /* already off */
359 		}
360 	}
361 
362 	/* remove all the MAC and VLAN first */
363 	ret = i40e_vsi_rm_mac_filter(vsi);
364 	if (ret) {
365 		PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
366 		return ret;
367 	}
368 	if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
369 		ret = i40e_add_rm_all_vlan_filter(vsi, 0);
370 		if (ret) {
371 			PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
372 			return ret;
373 		}
374 	}
375 
376 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
377 	if (on)
378 		vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
379 	else
380 		vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
381 
382 	memset(&ctxt, 0, sizeof(ctxt));
383 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
384 	ctxt.seid = vsi->seid;
385 
386 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
387 	if (ret != I40E_SUCCESS) {
388 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
389 		return ret;
390 	}
391 
392 	/* add all the MAC and VLAN back */
393 	ret = i40e_vsi_restore_mac_filter(vsi);
394 	if (ret)
395 		return ret;
396 	if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397 		ret = i40e_add_rm_all_vlan_filter(vsi, 1);
398 		if (ret)
399 			return ret;
400 	}
401 
402 	return ret;
403 }
404 
405 int
rte_pmd_i40e_set_tx_loopback(uint16_t port,uint8_t on)406 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
407 {
408 	struct rte_eth_dev *dev;
409 	struct i40e_pf *pf;
410 	struct i40e_pf_vf *vf;
411 	struct i40e_vsi *vsi;
412 	uint16_t vf_id;
413 	int ret;
414 
415 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
416 
417 	dev = &rte_eth_devices[port];
418 
419 	if (!is_i40e_supported(dev))
420 		return -ENOTSUP;
421 
422 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
423 
424 	/* setup PF TX loopback */
425 	vsi = pf->main_vsi;
426 	ret = i40e_vsi_set_tx_loopback(vsi, on);
427 	if (ret)
428 		return -ENOTSUP;
429 
430 	/* setup TX loopback for all the VFs */
431 	if (!pf->vfs) {
432 		/* if no VF, do nothing. */
433 		return 0;
434 	}
435 
436 	for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
437 		vf = &pf->vfs[vf_id];
438 		vsi = vf->vsi;
439 
440 		ret = i40e_vsi_set_tx_loopback(vsi, on);
441 		if (ret)
442 			return -ENOTSUP;
443 	}
444 
445 	return ret;
446 }
447 
448 int
rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port,uint16_t vf_id,uint8_t on)449 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
450 {
451 	struct rte_eth_dev *dev;
452 	struct i40e_pf *pf;
453 	struct i40e_vsi *vsi;
454 	struct i40e_hw *hw;
455 	int ret;
456 
457 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
458 
459 	dev = &rte_eth_devices[port];
460 
461 	if (!is_i40e_supported(dev))
462 		return -ENOTSUP;
463 
464 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
465 
466 	if (vf_id >= pf->vf_num || !pf->vfs) {
467 		PMD_DRV_LOG(ERR, "Invalid argument.");
468 		return -EINVAL;
469 	}
470 
471 	vsi = pf->vfs[vf_id].vsi;
472 	if (!vsi) {
473 		PMD_DRV_LOG(ERR, "Invalid VSI.");
474 		return -EINVAL;
475 	}
476 
477 	hw = I40E_VSI_TO_HW(vsi);
478 
479 	ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
480 						  on, NULL, true);
481 	if (ret != I40E_SUCCESS) {
482 		ret = -ENOTSUP;
483 		PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
484 	}
485 
486 	return ret;
487 }
488 
489 int
rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port,uint16_t vf_id,uint8_t on)490 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
491 {
492 	struct rte_eth_dev *dev;
493 	struct i40e_pf *pf;
494 	struct i40e_vsi *vsi;
495 	struct i40e_hw *hw;
496 	int ret;
497 
498 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
499 
500 	dev = &rte_eth_devices[port];
501 
502 	if (!is_i40e_supported(dev))
503 		return -ENOTSUP;
504 
505 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
506 
507 	if (vf_id >= pf->vf_num || !pf->vfs) {
508 		PMD_DRV_LOG(ERR, "Invalid argument.");
509 		return -EINVAL;
510 	}
511 
512 	vsi = pf->vfs[vf_id].vsi;
513 	if (!vsi) {
514 		PMD_DRV_LOG(ERR, "Invalid VSI.");
515 		return -EINVAL;
516 	}
517 
518 	hw = I40E_VSI_TO_HW(vsi);
519 
520 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
521 						    on, NULL);
522 	if (ret != I40E_SUCCESS) {
523 		ret = -ENOTSUP;
524 		PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
525 	}
526 
527 	return ret;
528 }
529 
530 int
rte_pmd_i40e_set_vf_mac_addr(uint16_t port,uint16_t vf_id,struct rte_ether_addr * mac_addr)531 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
532 			     struct rte_ether_addr *mac_addr)
533 {
534 	struct i40e_mac_filter *f;
535 	struct rte_eth_dev *dev;
536 	struct i40e_pf_vf *vf;
537 	struct i40e_vsi *vsi;
538 	struct i40e_pf *pf;
539 	void *temp;
540 
541 	if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
542 		return -EINVAL;
543 
544 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
545 
546 	dev = &rte_eth_devices[port];
547 
548 	if (!is_i40e_supported(dev))
549 		return -ENOTSUP;
550 
551 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
552 
553 	if (vf_id >= pf->vf_num || !pf->vfs)
554 		return -EINVAL;
555 
556 	vf = &pf->vfs[vf_id];
557 	vsi = vf->vsi;
558 	if (!vsi) {
559 		PMD_DRV_LOG(ERR, "Invalid VSI.");
560 		return -EINVAL;
561 	}
562 
563 	rte_ether_addr_copy(mac_addr, &vf->mac_addr);
564 
565 	/* Remove all existing mac */
566 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
567 		if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
568 				!= I40E_SUCCESS)
569 			PMD_DRV_LOG(WARNING, "Delete MAC failed");
570 
571 	return 0;
572 }
573 
574 static const struct rte_ether_addr null_mac_addr;
575 
576 int
rte_pmd_i40e_remove_vf_mac_addr(uint16_t port,uint16_t vf_id,struct rte_ether_addr * mac_addr)577 rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
578 	struct rte_ether_addr *mac_addr)
579 {
580 	struct rte_eth_dev *dev;
581 	struct i40e_pf_vf *vf;
582 	struct i40e_vsi *vsi;
583 	struct i40e_pf *pf;
584 	int ret;
585 
586 	if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
587 		return -EINVAL;
588 
589 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
590 
591 	dev = &rte_eth_devices[port];
592 
593 	if (!is_i40e_supported(dev))
594 		return -ENOTSUP;
595 
596 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
597 
598 	if (vf_id >= pf->vf_num || !pf->vfs)
599 		return -EINVAL;
600 
601 	vf = &pf->vfs[vf_id];
602 	vsi = vf->vsi;
603 	if (!vsi) {
604 		PMD_DRV_LOG(ERR, "Invalid VSI.");
605 		return -EINVAL;
606 	}
607 
608 	if (rte_is_same_ether_addr(mac_addr, &vf->mac_addr))
609 		/* Reset the mac with NULL address */
610 		rte_ether_addr_copy(&null_mac_addr, &vf->mac_addr);
611 
612 	/* Remove the mac */
613 	ret = i40e_vsi_delete_mac(vsi, mac_addr);
614 	if (ret != I40E_SUCCESS)
615 		return ret;
616 	return 0;
617 }
618 
619 /* Set vlan strip on/off for specific VF from host */
620 int
rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port,uint16_t vf_id,uint8_t on)621 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
622 {
623 	struct rte_eth_dev *dev;
624 	struct i40e_pf *pf;
625 	struct i40e_vsi *vsi;
626 	int ret;
627 
628 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
629 
630 	dev = &rte_eth_devices[port];
631 
632 	if (!is_i40e_supported(dev))
633 		return -ENOTSUP;
634 
635 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
636 
637 	if (vf_id >= pf->vf_num || !pf->vfs) {
638 		PMD_DRV_LOG(ERR, "Invalid argument.");
639 		return -EINVAL;
640 	}
641 
642 	vsi = pf->vfs[vf_id].vsi;
643 
644 	if (!vsi)
645 		return -EINVAL;
646 
647 	ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
648 	if (ret != I40E_SUCCESS) {
649 		ret = -ENOTSUP;
650 		PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
651 	}
652 
653 	return ret;
654 }
655 
rte_pmd_i40e_set_vf_vlan_insert(uint16_t port,uint16_t vf_id,uint16_t vlan_id)656 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
657 				    uint16_t vlan_id)
658 {
659 	struct rte_eth_dev *dev;
660 	struct i40e_pf *pf;
661 	struct i40e_hw *hw;
662 	struct i40e_vsi *vsi;
663 	struct i40e_vsi_context ctxt;
664 	int ret;
665 
666 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
667 
668 	if (vlan_id > RTE_ETHER_MAX_VLAN_ID) {
669 		PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
670 		return -EINVAL;
671 	}
672 
673 	dev = &rte_eth_devices[port];
674 
675 	if (!is_i40e_supported(dev))
676 		return -ENOTSUP;
677 
678 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
679 	hw = I40E_PF_TO_HW(pf);
680 
681 	/**
682 	 * return -ENODEV if SRIOV not enabled, VF number not configured
683 	 * or no queue assigned.
684 	 */
685 	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
686 	    pf->vf_nb_qps == 0)
687 		return -ENODEV;
688 
689 	if (vf_id >= pf->vf_num || !pf->vfs) {
690 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
691 		return -EINVAL;
692 	}
693 
694 	vsi = pf->vfs[vf_id].vsi;
695 	if (!vsi) {
696 		PMD_DRV_LOG(ERR, "Invalid VSI.");
697 		return -EINVAL;
698 	}
699 
700 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
701 	vsi->info.pvid = vlan_id;
702 	if (vlan_id > 0)
703 		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
704 	else
705 		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
706 
707 	memset(&ctxt, 0, sizeof(ctxt));
708 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
709 	ctxt.seid = vsi->seid;
710 
711 	hw = I40E_VSI_TO_HW(vsi);
712 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
713 	if (ret != I40E_SUCCESS) {
714 		ret = -ENOTSUP;
715 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
716 	}
717 
718 	return ret;
719 }
720 
rte_pmd_i40e_set_vf_broadcast(uint16_t port,uint16_t vf_id,uint8_t on)721 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
722 				  uint8_t on)
723 {
724 	struct rte_eth_dev *dev;
725 	struct i40e_pf *pf;
726 	struct i40e_vsi *vsi;
727 	struct i40e_hw *hw;
728 	struct i40e_mac_filter_info filter;
729 	struct rte_ether_addr broadcast = {
730 		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
731 	int ret;
732 
733 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
734 
735 	if (on > 1) {
736 		PMD_DRV_LOG(ERR, "on should be 0 or 1.");
737 		return -EINVAL;
738 	}
739 
740 	dev = &rte_eth_devices[port];
741 
742 	if (!is_i40e_supported(dev))
743 		return -ENOTSUP;
744 
745 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
746 	hw = I40E_PF_TO_HW(pf);
747 
748 	if (vf_id >= pf->vf_num || !pf->vfs) {
749 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
750 		return -EINVAL;
751 	}
752 
753 	/**
754 	 * return -ENODEV if SRIOV not enabled, VF number not configured
755 	 * or no queue assigned.
756 	 */
757 	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
758 	    pf->vf_nb_qps == 0) {
759 		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
760 		return -ENODEV;
761 	}
762 
763 	vsi = pf->vfs[vf_id].vsi;
764 	if (!vsi) {
765 		PMD_DRV_LOG(ERR, "Invalid VSI.");
766 		return -EINVAL;
767 	}
768 
769 	if (on) {
770 		rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
771 		filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
772 		ret = i40e_vsi_add_mac(vsi, &filter);
773 	} else {
774 		ret = i40e_vsi_delete_mac(vsi, &broadcast);
775 	}
776 
777 	if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
778 		ret = -ENOTSUP;
779 		PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
780 	} else {
781 		ret = 0;
782 	}
783 
784 	return ret;
785 }
786 
rte_pmd_i40e_set_vf_vlan_tag(uint16_t port,uint16_t vf_id,uint8_t on)787 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
788 {
789 	struct rte_eth_dev *dev;
790 	struct i40e_pf *pf;
791 	struct i40e_hw *hw;
792 	struct i40e_vsi *vsi;
793 	struct i40e_vsi_context ctxt;
794 	int ret;
795 
796 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
797 
798 	if (on > 1) {
799 		PMD_DRV_LOG(ERR, "on should be 0 or 1.");
800 		return -EINVAL;
801 	}
802 
803 	dev = &rte_eth_devices[port];
804 
805 	if (!is_i40e_supported(dev))
806 		return -ENOTSUP;
807 
808 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
809 	hw = I40E_PF_TO_HW(pf);
810 
811 	/**
812 	 * return -ENODEV if SRIOV not enabled, VF number not configured
813 	 * or no queue assigned.
814 	 */
815 	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
816 	    pf->vf_nb_qps == 0) {
817 		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
818 		return -ENODEV;
819 	}
820 
821 	if (vf_id >= pf->vf_num || !pf->vfs) {
822 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
823 		return -EINVAL;
824 	}
825 
826 	vsi = pf->vfs[vf_id].vsi;
827 	if (!vsi) {
828 		PMD_DRV_LOG(ERR, "Invalid VSI.");
829 		return -EINVAL;
830 	}
831 
832 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
833 	if (on) {
834 		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
835 		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
836 	} else {
837 		vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
838 		vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
839 	}
840 
841 	memset(&ctxt, 0, sizeof(ctxt));
842 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
843 	ctxt.seid = vsi->seid;
844 
845 	hw = I40E_VSI_TO_HW(vsi);
846 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
847 	if (ret != I40E_SUCCESS) {
848 		ret = -ENOTSUP;
849 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
850 	}
851 
852 	return ret;
853 }
854 
855 static int
i40e_vlan_filter_count(struct i40e_vsi * vsi)856 i40e_vlan_filter_count(struct i40e_vsi *vsi)
857 {
858 	uint32_t j, k;
859 	uint16_t vlan_id;
860 	int count = 0;
861 
862 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
863 		if (!vsi->vfta[j])
864 			continue;
865 
866 		for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
867 			if (!(vsi->vfta[j] & (1 << k)))
868 				continue;
869 
870 			vlan_id = j * I40E_UINT32_BIT_SIZE + k;
871 			if (!vlan_id)
872 				continue;
873 
874 			count++;
875 		}
876 	}
877 
878 	return count;
879 }
880 
rte_pmd_i40e_set_vf_vlan_filter(uint16_t port,uint16_t vlan_id,uint64_t vf_mask,uint8_t on)881 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
882 				    uint64_t vf_mask, uint8_t on)
883 {
884 	struct rte_eth_dev *dev;
885 	struct i40e_pf *pf;
886 	struct i40e_hw *hw;
887 	struct i40e_vsi *vsi;
888 	uint16_t vf_idx;
889 	int ret = I40E_SUCCESS;
890 
891 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
892 
893 	dev = &rte_eth_devices[port];
894 
895 	if (!is_i40e_supported(dev))
896 		return -ENOTSUP;
897 
898 	if (vlan_id > RTE_ETHER_MAX_VLAN_ID || !vlan_id) {
899 		PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
900 		return -EINVAL;
901 	}
902 
903 	if (vf_mask == 0) {
904 		PMD_DRV_LOG(ERR, "No VF.");
905 		return -EINVAL;
906 	}
907 
908 	if (on > 1) {
909 		PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
910 		return -EINVAL;
911 	}
912 
913 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
914 	hw = I40E_PF_TO_HW(pf);
915 
916 	/**
917 	 * return -ENODEV if SRIOV not enabled, VF number not configured
918 	 * or no queue assigned.
919 	 */
920 	if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
921 	    pf->vf_nb_qps == 0) {
922 		PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
923 		return -ENODEV;
924 	}
925 
926 	for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
927 		if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
928 			vsi = pf->vfs[vf_idx].vsi;
929 			if (on) {
930 				if (!vsi->vlan_filter_on) {
931 					vsi->vlan_filter_on = true;
932 					i40e_aq_set_vsi_vlan_promisc(hw,
933 								     vsi->seid,
934 								     false,
935 								     NULL);
936 					if (!vsi->vlan_anti_spoof_on)
937 						i40e_add_rm_all_vlan_filter(
938 							vsi, true);
939 				}
940 				ret = i40e_vsi_add_vlan(vsi, vlan_id);
941 			} else {
942 				ret = i40e_vsi_delete_vlan(vsi, vlan_id);
943 
944 				if (!i40e_vlan_filter_count(vsi)) {
945 					vsi->vlan_filter_on = false;
946 					i40e_aq_set_vsi_vlan_promisc(hw,
947 								     vsi->seid,
948 								     true,
949 								     NULL);
950 				}
951 			}
952 		}
953 	}
954 
955 	if (ret != I40E_SUCCESS) {
956 		ret = -ENOTSUP;
957 		PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
958 	}
959 
960 	return ret;
961 }
962 
963 int
rte_pmd_i40e_get_vf_stats(uint16_t port,uint16_t vf_id,struct rte_eth_stats * stats)964 rte_pmd_i40e_get_vf_stats(uint16_t port,
965 			  uint16_t vf_id,
966 			  struct rte_eth_stats *stats)
967 {
968 	struct rte_eth_dev *dev;
969 	struct i40e_pf *pf;
970 	struct i40e_vsi *vsi;
971 
972 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
973 
974 	dev = &rte_eth_devices[port];
975 
976 	if (!is_i40e_supported(dev))
977 		return -ENOTSUP;
978 
979 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
980 
981 	if (vf_id >= pf->vf_num || !pf->vfs) {
982 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
983 		return -EINVAL;
984 	}
985 
986 	vsi = pf->vfs[vf_id].vsi;
987 	if (!vsi) {
988 		PMD_DRV_LOG(ERR, "Invalid VSI.");
989 		return -EINVAL;
990 	}
991 
992 	i40e_update_vsi_stats(vsi);
993 
994 	stats->ipackets = vsi->eth_stats.rx_unicast +
995 			vsi->eth_stats.rx_multicast +
996 			vsi->eth_stats.rx_broadcast;
997 	stats->opackets = vsi->eth_stats.tx_unicast +
998 			vsi->eth_stats.tx_multicast +
999 			vsi->eth_stats.tx_broadcast;
1000 	stats->ibytes   = vsi->eth_stats.rx_bytes;
1001 	stats->obytes   = vsi->eth_stats.tx_bytes;
1002 	stats->ierrors  = vsi->eth_stats.rx_discards;
1003 	stats->oerrors  = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
1004 
1005 	return 0;
1006 }
1007 
1008 int
rte_pmd_i40e_reset_vf_stats(uint16_t port,uint16_t vf_id)1009 rte_pmd_i40e_reset_vf_stats(uint16_t port,
1010 			    uint16_t vf_id)
1011 {
1012 	struct rte_eth_dev *dev;
1013 	struct i40e_pf *pf;
1014 	struct i40e_vsi *vsi;
1015 
1016 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1017 
1018 	dev = &rte_eth_devices[port];
1019 
1020 	if (!is_i40e_supported(dev))
1021 		return -ENOTSUP;
1022 
1023 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1024 
1025 	if (vf_id >= pf->vf_num || !pf->vfs) {
1026 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
1027 		return -EINVAL;
1028 	}
1029 
1030 	vsi = pf->vfs[vf_id].vsi;
1031 	if (!vsi) {
1032 		PMD_DRV_LOG(ERR, "Invalid VSI.");
1033 		return -EINVAL;
1034 	}
1035 
1036 	vsi->offset_loaded = false;
1037 	i40e_update_vsi_stats(vsi);
1038 
1039 	return 0;
1040 }
1041 
1042 int
rte_pmd_i40e_set_vf_max_bw(uint16_t port,uint16_t vf_id,uint32_t bw)1043 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1044 {
1045 	struct rte_eth_dev *dev;
1046 	struct i40e_pf *pf;
1047 	struct i40e_vsi *vsi;
1048 	struct i40e_hw *hw;
1049 	int ret = 0;
1050 	int i;
1051 
1052 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1053 
1054 	dev = &rte_eth_devices[port];
1055 
1056 	if (!is_i40e_supported(dev))
1057 		return -ENOTSUP;
1058 
1059 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1060 
1061 	if (vf_id >= pf->vf_num || !pf->vfs) {
1062 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
1063 		return -EINVAL;
1064 	}
1065 
1066 	vsi = pf->vfs[vf_id].vsi;
1067 	if (!vsi) {
1068 		PMD_DRV_LOG(ERR, "Invalid VSI.");
1069 		return -EINVAL;
1070 	}
1071 
1072 	if (bw > I40E_QOS_BW_MAX) {
1073 		PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1074 			    I40E_QOS_BW_MAX);
1075 		return -EINVAL;
1076 	}
1077 
1078 	if (bw % I40E_QOS_BW_GRANULARITY) {
1079 		PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1080 			    I40E_QOS_BW_GRANULARITY);
1081 		return -EINVAL;
1082 	}
1083 
1084 	bw /= I40E_QOS_BW_GRANULARITY;
1085 
1086 	hw = I40E_VSI_TO_HW(vsi);
1087 
1088 	/* No change. */
1089 	if (bw == vsi->bw_info.bw_limit) {
1090 		PMD_DRV_LOG(INFO,
1091 			    "No change for VF max bandwidth. Nothing to do.");
1092 		return 0;
1093 	}
1094 
1095 	/**
1096 	 * VF bandwidth limitation and TC bandwidth limitation cannot be
1097 	 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1098 	 *
1099 	 * If bw is 0, means disable bandwidth limitation. Then no need to
1100 	 * check TC bandwidth limitation.
1101 	 */
1102 	if (bw) {
1103 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1104 			if ((vsi->enabled_tc & BIT_ULL(i)) &&
1105 			    vsi->bw_info.bw_ets_credits[i])
1106 				break;
1107 		}
1108 		if (i != I40E_MAX_TRAFFIC_CLASS) {
1109 			PMD_DRV_LOG(ERR,
1110 				    "TC max bandwidth has been set on this VF,"
1111 				    " please disable it first.");
1112 			return -EINVAL;
1113 		}
1114 	}
1115 
1116 	ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1117 	if (ret) {
1118 		PMD_DRV_LOG(ERR,
1119 			    "Failed to set VF %d bandwidth, err(%d).",
1120 			    vf_id, ret);
1121 		return -EINVAL;
1122 	}
1123 
1124 	/* Store the configuration. */
1125 	vsi->bw_info.bw_limit = (uint16_t)bw;
1126 	vsi->bw_info.bw_max = 0;
1127 
1128 	return 0;
1129 }
1130 
1131 int
rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port,uint16_t vf_id,uint8_t tc_num,uint8_t * bw_weight)1132 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1133 				uint8_t tc_num, uint8_t *bw_weight)
1134 {
1135 	struct rte_eth_dev *dev;
1136 	struct i40e_pf *pf;
1137 	struct i40e_vsi *vsi;
1138 	struct i40e_hw *hw;
1139 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1140 	int ret = 0;
1141 	int i, j;
1142 	uint16_t sum;
1143 	bool b_change = false;
1144 
1145 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1146 
1147 	dev = &rte_eth_devices[port];
1148 
1149 	if (!is_i40e_supported(dev))
1150 		return -ENOTSUP;
1151 
1152 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1153 
1154 	if (vf_id >= pf->vf_num || !pf->vfs) {
1155 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
1156 		return -EINVAL;
1157 	}
1158 
1159 	vsi = pf->vfs[vf_id].vsi;
1160 	if (!vsi) {
1161 		PMD_DRV_LOG(ERR, "Invalid VSI.");
1162 		return -EINVAL;
1163 	}
1164 
1165 	if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1166 		PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1167 			    I40E_MAX_TRAFFIC_CLASS);
1168 		return -EINVAL;
1169 	}
1170 
1171 	sum = 0;
1172 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1173 		if (vsi->enabled_tc & BIT_ULL(i))
1174 			sum++;
1175 	}
1176 	if (sum != tc_num) {
1177 		PMD_DRV_LOG(ERR,
1178 			    "Weight should be set for all %d enabled TCs.",
1179 			    sum);
1180 		return -EINVAL;
1181 	}
1182 
1183 	sum = 0;
1184 	for (i = 0; i < tc_num; i++) {
1185 		if (!bw_weight[i]) {
1186 			PMD_DRV_LOG(ERR,
1187 				    "The weight should be 1 at least.");
1188 			return -EINVAL;
1189 		}
1190 		sum += bw_weight[i];
1191 	}
1192 	if (sum != 100) {
1193 		PMD_DRV_LOG(ERR,
1194 			    "The summary of the TC weight should be 100.");
1195 		return -EINVAL;
1196 	}
1197 
1198 	/**
1199 	 * Create the configuration for all the TCs.
1200 	 */
1201 	memset(&tc_bw, 0, sizeof(tc_bw));
1202 	tc_bw.tc_valid_bits = vsi->enabled_tc;
1203 	j = 0;
1204 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1205 		if (vsi->enabled_tc & BIT_ULL(i)) {
1206 			if (bw_weight[j] !=
1207 				vsi->bw_info.bw_ets_share_credits[i])
1208 				b_change = true;
1209 
1210 			tc_bw.tc_bw_credits[i] = bw_weight[j];
1211 			j++;
1212 		}
1213 	}
1214 
1215 	/* No change. */
1216 	if (!b_change) {
1217 		PMD_DRV_LOG(INFO,
1218 			    "No change for TC allocated bandwidth."
1219 			    " Nothing to do.");
1220 		return 0;
1221 	}
1222 
1223 	hw = I40E_VSI_TO_HW(vsi);
1224 
1225 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1226 	if (ret) {
1227 		PMD_DRV_LOG(ERR,
1228 			    "Failed to set VF %d TC bandwidth weight, err(%d).",
1229 			    vf_id, ret);
1230 		return -EINVAL;
1231 	}
1232 
1233 	/* Store the configuration. */
1234 	j = 0;
1235 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1236 		if (vsi->enabled_tc & BIT_ULL(i)) {
1237 			vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1238 			j++;
1239 		}
1240 	}
1241 
1242 	return 0;
1243 }
1244 
1245 int
rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port,uint16_t vf_id,uint8_t tc_no,uint32_t bw)1246 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1247 			      uint8_t tc_no, uint32_t bw)
1248 {
1249 	struct rte_eth_dev *dev;
1250 	struct i40e_pf *pf;
1251 	struct i40e_vsi *vsi;
1252 	struct i40e_hw *hw;
1253 	struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1254 	int ret = 0;
1255 	int i;
1256 
1257 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1258 
1259 	dev = &rte_eth_devices[port];
1260 
1261 	if (!is_i40e_supported(dev))
1262 		return -ENOTSUP;
1263 
1264 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1265 
1266 	if (vf_id >= pf->vf_num || !pf->vfs) {
1267 		PMD_DRV_LOG(ERR, "Invalid VF ID.");
1268 		return -EINVAL;
1269 	}
1270 
1271 	vsi = pf->vfs[vf_id].vsi;
1272 	if (!vsi) {
1273 		PMD_DRV_LOG(ERR, "Invalid VSI.");
1274 		return -EINVAL;
1275 	}
1276 
1277 	if (bw > I40E_QOS_BW_MAX) {
1278 		PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1279 			    I40E_QOS_BW_MAX);
1280 		return -EINVAL;
1281 	}
1282 
1283 	if (bw % I40E_QOS_BW_GRANULARITY) {
1284 		PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1285 			    I40E_QOS_BW_GRANULARITY);
1286 		return -EINVAL;
1287 	}
1288 
1289 	bw /= I40E_QOS_BW_GRANULARITY;
1290 
1291 	if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1292 		PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1293 			    I40E_MAX_TRAFFIC_CLASS);
1294 		return -EINVAL;
1295 	}
1296 
1297 	hw = I40E_VSI_TO_HW(vsi);
1298 
1299 	if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1300 		PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1301 			    vf_id, tc_no);
1302 		return -EINVAL;
1303 	}
1304 
1305 	/* No change. */
1306 	if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1307 		PMD_DRV_LOG(INFO,
1308 			    "No change for TC max bandwidth. Nothing to do.");
1309 		return 0;
1310 	}
1311 
1312 	/**
1313 	 * VF bandwidth limitation and TC bandwidth limitation cannot be
1314 	 * enabled in parallel, disable VF bandwidth limitation if it's
1315 	 * enabled.
1316 	 * If bw is 0, means disable bandwidth limitation. Then no need to
1317 	 * care about VF bandwidth limitation configuration.
1318 	 */
1319 	if (bw && vsi->bw_info.bw_limit) {
1320 		ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1321 		if (ret) {
1322 			PMD_DRV_LOG(ERR,
1323 				    "Failed to disable VF(%d)"
1324 				    " bandwidth limitation, err(%d).",
1325 				    vf_id, ret);
1326 			return -EINVAL;
1327 		}
1328 
1329 		PMD_DRV_LOG(INFO,
1330 			    "VF max bandwidth is disabled according"
1331 			    " to TC max bandwidth setting.");
1332 	}
1333 
1334 	/**
1335 	 * Get all the TCs' info to create a whole picture.
1336 	 * Because the incremental change isn't permitted.
1337 	 */
1338 	memset(&tc_bw, 0, sizeof(tc_bw));
1339 	tc_bw.tc_valid_bits = vsi->enabled_tc;
1340 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1341 		if (vsi->enabled_tc & BIT_ULL(i)) {
1342 			tc_bw.tc_bw_credits[i] =
1343 				rte_cpu_to_le_16(
1344 					vsi->bw_info.bw_ets_credits[i]);
1345 		}
1346 	}
1347 	tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1348 
1349 	ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1350 	if (ret) {
1351 		PMD_DRV_LOG(ERR,
1352 			    "Failed to set VF %d TC %d max bandwidth, err(%d).",
1353 			    vf_id, tc_no, ret);
1354 		return -EINVAL;
1355 	}
1356 
1357 	/* Store the configuration. */
1358 	vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1359 
1360 	return 0;
1361 }
1362 
1363 int
rte_pmd_i40e_set_tc_strict_prio(uint16_t port,uint8_t tc_map)1364 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1365 {
1366 	struct rte_eth_dev *dev;
1367 	struct i40e_pf *pf;
1368 	struct i40e_vsi *vsi;
1369 	struct i40e_veb *veb;
1370 	struct i40e_hw *hw;
1371 	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1372 	int i;
1373 	int ret;
1374 
1375 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1376 
1377 	dev = &rte_eth_devices[port];
1378 
1379 	if (!is_i40e_supported(dev))
1380 		return -ENOTSUP;
1381 
1382 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1383 
1384 	vsi = pf->main_vsi;
1385 	if (!vsi) {
1386 		PMD_DRV_LOG(ERR, "Invalid VSI.");
1387 		return -EINVAL;
1388 	}
1389 
1390 	veb = vsi->veb;
1391 	if (!veb) {
1392 		PMD_DRV_LOG(ERR, "Invalid VEB.");
1393 		return -EINVAL;
1394 	}
1395 
1396 	if ((tc_map & veb->enabled_tc) != tc_map) {
1397 		PMD_DRV_LOG(ERR,
1398 			    "TC bitmap isn't the subset of enabled TCs 0x%x.",
1399 			    veb->enabled_tc);
1400 		return -EINVAL;
1401 	}
1402 
1403 	if (tc_map == veb->strict_prio_tc) {
1404 		PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1405 		return 0;
1406 	}
1407 
1408 	hw = I40E_VSI_TO_HW(vsi);
1409 
1410 	/* Disable DCBx if it's the first time to set strict priority. */
1411 	if (!veb->strict_prio_tc) {
1412 		ret = i40e_aq_stop_lldp(hw, true, true, NULL);
1413 		if (ret)
1414 			PMD_DRV_LOG(INFO,
1415 				    "Failed to disable DCBx as it's already"
1416 				    " disabled.");
1417 		else
1418 			PMD_DRV_LOG(INFO,
1419 				    "DCBx is disabled according to strict"
1420 				    " priority setting.");
1421 	}
1422 
1423 	memset(&ets_data, 0, sizeof(ets_data));
1424 	ets_data.tc_valid_bits = veb->enabled_tc;
1425 	ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1426 	ets_data.tc_strict_priority_flags = tc_map;
1427 	/* Get all TCs' bandwidth. */
1428 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1429 		if (veb->enabled_tc & BIT_ULL(i)) {
1430 			/* For rubust, if bandwidth is 0, use 1 instead. */
1431 			if (veb->bw_info.bw_ets_share_credits[i])
1432 				ets_data.tc_bw_share_credits[i] =
1433 					veb->bw_info.bw_ets_share_credits[i];
1434 			else
1435 				ets_data.tc_bw_share_credits[i] =
1436 					I40E_QOS_BW_WEIGHT_MIN;
1437 		}
1438 	}
1439 
1440 	if (!veb->strict_prio_tc)
1441 		ret = i40e_aq_config_switch_comp_ets(
1442 			hw, veb->uplink_seid,
1443 			&ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1444 			NULL);
1445 	else if (tc_map)
1446 		ret = i40e_aq_config_switch_comp_ets(
1447 			hw, veb->uplink_seid,
1448 			&ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1449 			NULL);
1450 	else
1451 		ret = i40e_aq_config_switch_comp_ets(
1452 			hw, veb->uplink_seid,
1453 			&ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1454 			NULL);
1455 
1456 	if (ret) {
1457 		PMD_DRV_LOG(ERR,
1458 			    "Failed to set TCs' strict priority mode."
1459 			    " err (%d)", ret);
1460 		return -EINVAL;
1461 	}
1462 
1463 	veb->strict_prio_tc = tc_map;
1464 
1465 	/* Enable DCBx again, if all the TCs' strict priority disabled. */
1466 	if (!tc_map) {
1467 		ret = i40e_aq_start_lldp(hw, true, NULL);
1468 		if (ret) {
1469 			PMD_DRV_LOG(ERR,
1470 				    "Failed to enable DCBx, err(%d).", ret);
1471 			return -EINVAL;
1472 		}
1473 
1474 		PMD_DRV_LOG(INFO,
1475 			    "DCBx is enabled again according to strict"
1476 			    " priority setting.");
1477 	}
1478 
1479 	return ret;
1480 }
1481 
1482 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1483 #define I40E_MAX_PROFILE_NUM 16
1484 
1485 static void
i40e_generate_profile_info_sec(char * name,struct i40e_ddp_version * version,uint32_t track_id,uint8_t * profile_info_sec,bool add)1486 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1487 			       uint32_t track_id, uint8_t *profile_info_sec,
1488 			       bool add)
1489 {
1490 	struct i40e_profile_section_header *sec = NULL;
1491 	struct i40e_profile_info *pinfo;
1492 
1493 	sec = (struct i40e_profile_section_header *)profile_info_sec;
1494 	sec->tbl_size = 1;
1495 	sec->data_end = sizeof(struct i40e_profile_section_header) +
1496 		sizeof(struct i40e_profile_info);
1497 	sec->section.type = SECTION_TYPE_INFO;
1498 	sec->section.offset = sizeof(struct i40e_profile_section_header);
1499 	sec->section.size = sizeof(struct i40e_profile_info);
1500 	pinfo = (struct i40e_profile_info *)(profile_info_sec +
1501 					     sec->section.offset);
1502 	pinfo->track_id = track_id;
1503 	memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1504 	memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1505 	if (add)
1506 		pinfo->op = I40E_DDP_ADD_TRACKID;
1507 	else
1508 		pinfo->op = I40E_DDP_REMOVE_TRACKID;
1509 }
1510 
1511 static enum i40e_status_code
i40e_add_rm_profile_info(struct i40e_hw * hw,uint8_t * profile_info_sec)1512 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1513 {
1514 	enum i40e_status_code status = I40E_SUCCESS;
1515 	struct i40e_profile_section_header *sec;
1516 	uint32_t track_id;
1517 	uint32_t offset = 0;
1518 	uint32_t info = 0;
1519 
1520 	sec = (struct i40e_profile_section_header *)profile_info_sec;
1521 	track_id = ((struct i40e_profile_info *)(profile_info_sec +
1522 					 sec->section.offset))->track_id;
1523 
1524 	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1525 				   track_id, &offset, &info, NULL);
1526 	if (status)
1527 		PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1528 			    "offset %d, info %d",
1529 			    offset, info);
1530 
1531 	return status;
1532 }
1533 
1534 /* Check if the profile info exists */
1535 static int
i40e_check_profile_info(uint16_t port,uint8_t * profile_info_sec)1536 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1537 {
1538 	struct rte_eth_dev *dev = &rte_eth_devices[port];
1539 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1540 	uint8_t *buff;
1541 	struct rte_pmd_i40e_profile_list *p_list;
1542 	struct rte_pmd_i40e_profile_info *pinfo, *p;
1543 	uint32_t i;
1544 	int ret;
1545 	static const uint32_t group_mask = 0x00ff0000;
1546 
1547 	pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1548 			     sizeof(struct i40e_profile_section_header));
1549 	if (pinfo->track_id == 0) {
1550 		PMD_DRV_LOG(INFO, "Read-only profile.");
1551 		return 0;
1552 	}
1553 	buff = rte_zmalloc("pinfo_list",
1554 			   (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1555 			   0);
1556 	if (!buff) {
1557 		PMD_DRV_LOG(ERR, "failed to allocate memory");
1558 		return -1;
1559 	}
1560 
1561 	ret = i40e_aq_get_ddp_list(
1562 		hw, (void *)buff,
1563 		(I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1564 		0, NULL);
1565 	if (ret) {
1566 		PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1567 		rte_free(buff);
1568 		return -1;
1569 	}
1570 	p_list = (struct rte_pmd_i40e_profile_list *)buff;
1571 	for (i = 0; i < p_list->p_count; i++) {
1572 		p = &p_list->p_info[i];
1573 		if (pinfo->track_id == p->track_id) {
1574 			PMD_DRV_LOG(INFO, "Profile exists.");
1575 			rte_free(buff);
1576 			return 1;
1577 		}
1578 	}
1579 	/* profile with group id 0xff is compatible with any other profile */
1580 	if ((pinfo->track_id & group_mask) == group_mask) {
1581 		rte_free(buff);
1582 		return 0;
1583 	}
1584 	for (i = 0; i < p_list->p_count; i++) {
1585 		p = &p_list->p_info[i];
1586 		if ((p->track_id & group_mask) == 0) {
1587 			PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
1588 			rte_free(buff);
1589 			return 2;
1590 		}
1591 	}
1592 	for (i = 0; i < p_list->p_count; i++) {
1593 		p = &p_list->p_info[i];
1594 		if ((p->track_id & group_mask) == group_mask)
1595 			continue;
1596 		if ((pinfo->track_id & group_mask) !=
1597 		    (p->track_id & group_mask)) {
1598 			PMD_DRV_LOG(INFO, "Profile of different group exists.");
1599 			rte_free(buff);
1600 			return 3;
1601 		}
1602 	}
1603 
1604 	rte_free(buff);
1605 	return 0;
1606 }
1607 
1608 int
rte_pmd_i40e_process_ddp_package(uint16_t port,uint8_t * buff,uint32_t size,enum rte_pmd_i40e_package_op op)1609 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1610 				 uint32_t size,
1611 				 enum rte_pmd_i40e_package_op op)
1612 {
1613 	struct rte_eth_dev *dev;
1614 	struct i40e_hw *hw;
1615 	struct i40e_package_header *pkg_hdr;
1616 	struct i40e_generic_seg_header *profile_seg_hdr;
1617 	struct i40e_generic_seg_header *metadata_seg_hdr;
1618 	uint32_t track_id;
1619 	uint8_t *profile_info_sec;
1620 	int is_exist;
1621 	enum i40e_status_code status = I40E_SUCCESS;
1622 	static const uint32_t type_mask = 0xff000000;
1623 
1624 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1625 		op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1626 		op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1627 		PMD_DRV_LOG(ERR, "Operation not supported.");
1628 		return -ENOTSUP;
1629 	}
1630 
1631 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1632 
1633 	dev = &rte_eth_devices[port];
1634 
1635 	if (!is_i40e_supported(dev))
1636 		return -ENOTSUP;
1637 
1638 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639 
1640 	if (size < (sizeof(struct i40e_package_header) +
1641 		    sizeof(struct i40e_metadata_segment) +
1642 		    sizeof(uint32_t) * 2)) {
1643 		PMD_DRV_LOG(ERR, "Buff is invalid.");
1644 		return -EINVAL;
1645 	}
1646 
1647 	pkg_hdr = (struct i40e_package_header *)buff;
1648 
1649 	if (!pkg_hdr) {
1650 		PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1651 		return -EINVAL;
1652 	}
1653 
1654 	if (pkg_hdr->segment_count < 2) {
1655 		PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1656 		return -EINVAL;
1657 	}
1658 
1659 	/* Find metadata segment */
1660 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1661 							pkg_hdr);
1662 	if (!metadata_seg_hdr) {
1663 		PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1664 		return -EINVAL;
1665 	}
1666 	track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1667 	if (track_id == I40E_DDP_TRACKID_INVALID) {
1668 		PMD_DRV_LOG(ERR, "Invalid track_id");
1669 		return -EINVAL;
1670 	}
1671 
1672 	/* force read-only track_id for type 0 */
1673 	if ((track_id & type_mask) == 0)
1674 		track_id = 0;
1675 
1676 	/* Find profile segment */
1677 	profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1678 						       pkg_hdr);
1679 	if (!profile_seg_hdr) {
1680 		PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1681 		return -EINVAL;
1682 	}
1683 
1684 	profile_info_sec = rte_zmalloc(
1685 		"i40e_profile_info",
1686 		sizeof(struct i40e_profile_section_header) +
1687 		sizeof(struct i40e_profile_info),
1688 		0);
1689 	if (!profile_info_sec) {
1690 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
1691 		return -EINVAL;
1692 	}
1693 
1694 	/* Check if the profile already loaded */
1695 	i40e_generate_profile_info_sec(
1696 		((struct i40e_profile_segment *)profile_seg_hdr)->name,
1697 		&((struct i40e_profile_segment *)profile_seg_hdr)->version,
1698 		track_id, profile_info_sec,
1699 		op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1700 	is_exist = i40e_check_profile_info(port, profile_info_sec);
1701 	if (is_exist < 0) {
1702 		PMD_DRV_LOG(ERR, "Failed to check profile.");
1703 		rte_free(profile_info_sec);
1704 		return -EINVAL;
1705 	}
1706 
1707 	if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1708 		if (is_exist) {
1709 			if (is_exist == 1)
1710 				PMD_DRV_LOG(ERR, "Profile already exists.");
1711 			else if (is_exist == 2)
1712 				PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
1713 			else if (is_exist == 3)
1714 				PMD_DRV_LOG(ERR, "Profile of different group already exists");
1715 			i40e_update_customized_info(dev, buff, size, op);
1716 			rte_free(profile_info_sec);
1717 			return -EEXIST;
1718 		}
1719 	} else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1720 		if (is_exist != 1) {
1721 			PMD_DRV_LOG(ERR, "Profile does not exist.");
1722 			rte_free(profile_info_sec);
1723 			return -EACCES;
1724 		}
1725 	}
1726 
1727 	if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1728 		status = i40e_rollback_profile(
1729 			hw,
1730 			(struct i40e_profile_segment *)profile_seg_hdr,
1731 			track_id);
1732 		if (status) {
1733 			PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1734 			rte_free(profile_info_sec);
1735 			return status;
1736 		}
1737 	} else {
1738 		status = i40e_write_profile(
1739 			hw,
1740 			(struct i40e_profile_segment *)profile_seg_hdr,
1741 			track_id);
1742 		if (status) {
1743 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1744 				PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1745 			else
1746 				PMD_DRV_LOG(ERR, "Failed to write profile.");
1747 			rte_free(profile_info_sec);
1748 			return status;
1749 		}
1750 	}
1751 
1752 	if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1753 		/* Modify loaded profiles info list */
1754 		status = i40e_add_rm_profile_info(hw, profile_info_sec);
1755 		if (status) {
1756 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1757 				PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1758 			else
1759 				PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1760 		}
1761 	}
1762 
1763 	if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
1764 	    op == RTE_PMD_I40E_PKG_OP_WR_DEL)
1765 		i40e_update_customized_info(dev, buff, size, op);
1766 
1767 	rte_free(profile_info_sec);
1768 	return status;
1769 }
1770 
1771 /* Get number of tvl records in the section */
1772 static unsigned int
i40e_get_tlv_section_size(struct i40e_profile_section_header * sec)1773 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1774 {
1775 	unsigned int i, nb_rec, nb_tlv = 0;
1776 	struct i40e_profile_tlv_section_record *tlv;
1777 
1778 	if (!sec)
1779 		return nb_tlv;
1780 
1781 	/* get number of records in the section */
1782 	nb_rec = sec->section.size /
1783 				sizeof(struct i40e_profile_tlv_section_record);
1784 	for (i = 0; i < nb_rec; ) {
1785 		tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1786 		i += tlv->len;
1787 		nb_tlv++;
1788 	}
1789 	return nb_tlv;
1790 }
1791 
rte_pmd_i40e_get_ddp_info(uint8_t * pkg_buff,uint32_t pkg_size,uint8_t * info_buff,uint32_t info_size,enum rte_pmd_i40e_package_info type)1792 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1793 	uint8_t *info_buff, uint32_t info_size,
1794 	enum rte_pmd_i40e_package_info type)
1795 {
1796 	uint32_t ret_size;
1797 	struct i40e_package_header *pkg_hdr;
1798 	struct i40e_generic_seg_header *i40e_seg_hdr;
1799 	struct i40e_generic_seg_header *note_seg_hdr;
1800 	struct i40e_generic_seg_header *metadata_seg_hdr;
1801 
1802 	if (!info_buff) {
1803 		PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1804 		return -EINVAL;
1805 	}
1806 
1807 	if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1808 		sizeof(struct i40e_metadata_segment) +
1809 		sizeof(uint32_t) * 2)) {
1810 		PMD_DRV_LOG(ERR, "Package buff is invalid.");
1811 		return -EINVAL;
1812 	}
1813 
1814 	pkg_hdr = (struct i40e_package_header *)pkg_buff;
1815 	if (pkg_hdr->segment_count < 2) {
1816 		PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1817 		return -EINVAL;
1818 	}
1819 
1820 	/* Find metadata segment */
1821 	metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1822 		pkg_hdr);
1823 
1824 	/* Find global notes segment */
1825 	note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1826 		pkg_hdr);
1827 
1828 	/* Find i40e profile segment */
1829 	i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1830 
1831 	/* get global header info */
1832 	if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1833 		struct rte_pmd_i40e_profile_info *info =
1834 			(struct rte_pmd_i40e_profile_info *)info_buff;
1835 
1836 		if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1837 			PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1838 			return -EINVAL;
1839 		}
1840 
1841 		if (!metadata_seg_hdr) {
1842 			PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1843 			return -EINVAL;
1844 		}
1845 
1846 		memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1847 		info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1848 		info->track_id =
1849 			((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1850 
1851 		memcpy(info->name,
1852 			((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1853 			I40E_DDP_NAME_SIZE);
1854 		memcpy(&info->version,
1855 			&((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1856 			sizeof(struct i40e_ddp_version));
1857 		return I40E_SUCCESS;
1858 	}
1859 
1860 	/* get global note size */
1861 	if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1862 		if (info_size < sizeof(uint32_t)) {
1863 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
1864 			return -EINVAL;
1865 		}
1866 		if (note_seg_hdr == NULL)
1867 			ret_size = 0;
1868 		else
1869 			ret_size = note_seg_hdr->size;
1870 		*(uint32_t *)info_buff = ret_size;
1871 		return I40E_SUCCESS;
1872 	}
1873 
1874 	/* get global note */
1875 	if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1876 		if (note_seg_hdr == NULL)
1877 			return -ENOTSUP;
1878 		if (info_size < note_seg_hdr->size) {
1879 			PMD_DRV_LOG(ERR, "Information buffer size is too small");
1880 			return -EINVAL;
1881 		}
1882 		memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
1883 		return I40E_SUCCESS;
1884 	}
1885 
1886 	/* get i40e segment header info */
1887 	if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1888 		struct rte_pmd_i40e_profile_info *info =
1889 			(struct rte_pmd_i40e_profile_info *)info_buff;
1890 
1891 		if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1892 			PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1893 			return -EINVAL;
1894 		}
1895 
1896 		if (!metadata_seg_hdr) {
1897 			PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1898 			return -EINVAL;
1899 		}
1900 
1901 		if (!i40e_seg_hdr) {
1902 			PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1903 			return -EINVAL;
1904 		}
1905 
1906 		memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1907 		info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1908 		info->track_id =
1909 			((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1910 
1911 		memcpy(info->name,
1912 			((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1913 			I40E_DDP_NAME_SIZE);
1914 		memcpy(&info->version,
1915 			&((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1916 			sizeof(struct i40e_ddp_version));
1917 		return I40E_SUCCESS;
1918 	}
1919 
1920 	/* get number of devices */
1921 	if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1922 		if (info_size < sizeof(uint32_t)) {
1923 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
1924 			return -EINVAL;
1925 		}
1926 		*(uint32_t *)info_buff =
1927 			((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1928 		return I40E_SUCCESS;
1929 	}
1930 
1931 	/* get list of devices */
1932 	if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1933 		uint32_t dev_num;
1934 		dev_num =
1935 			((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1936 		if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1937 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
1938 			return -EINVAL;
1939 		}
1940 		memcpy(info_buff,
1941 			((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1942 			sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1943 		return I40E_SUCCESS;
1944 	}
1945 
1946 	/* get number of protocols */
1947 	if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1948 		struct i40e_profile_section_header *proto;
1949 
1950 		if (info_size < sizeof(uint32_t)) {
1951 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
1952 			return -EINVAL;
1953 		}
1954 		proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1955 				(struct i40e_profile_segment *)i40e_seg_hdr);
1956 		*(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1957 		return I40E_SUCCESS;
1958 	}
1959 
1960 	/* get list of protocols */
1961 	if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1962 		uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1963 		struct rte_pmd_i40e_proto_info *pinfo;
1964 		struct i40e_profile_section_header *proto;
1965 		struct i40e_profile_tlv_section_record *tlv;
1966 
1967 		pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1968 		nb_proto_info = info_size /
1969 					sizeof(struct rte_pmd_i40e_proto_info);
1970 		for (i = 0; i < nb_proto_info; i++) {
1971 			pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1972 			memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1973 		}
1974 		proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1975 				(struct i40e_profile_segment *)i40e_seg_hdr);
1976 		nb_tlv = i40e_get_tlv_section_size(proto);
1977 		if (nb_tlv == 0)
1978 			return I40E_SUCCESS;
1979 		if (nb_proto_info < nb_tlv) {
1980 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
1981 			return -EINVAL;
1982 		}
1983 		/* get number of records in the section */
1984 		nb_rec = proto->section.size /
1985 				sizeof(struct i40e_profile_tlv_section_record);
1986 		tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1987 		for (i = j = 0; i < nb_rec; j++) {
1988 			pinfo[j].proto_id = tlv->data[0];
1989 			strlcpy(pinfo[j].name, (const char *)&tlv->data[1],
1990 				I40E_DDP_NAME_SIZE);
1991 			i += tlv->len;
1992 			tlv = &tlv[tlv->len];
1993 		}
1994 		return I40E_SUCCESS;
1995 	}
1996 
1997 	/* get number of packet classification types */
1998 	if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1999 		struct i40e_profile_section_header *pctype;
2000 
2001 		if (info_size < sizeof(uint32_t)) {
2002 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
2003 			return -EINVAL;
2004 		}
2005 		pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2006 				(struct i40e_profile_segment *)i40e_seg_hdr);
2007 		*(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
2008 		return I40E_SUCCESS;
2009 	}
2010 
2011 	/* get list of packet classification types */
2012 	if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
2013 		uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2014 		struct rte_pmd_i40e_ptype_info *pinfo;
2015 		struct i40e_profile_section_header *pctype;
2016 		struct i40e_profile_tlv_section_record *tlv;
2017 
2018 		pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2019 		nb_proto_info = info_size /
2020 					sizeof(struct rte_pmd_i40e_ptype_info);
2021 		for (i = 0; i < nb_proto_info; i++)
2022 			memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2023 			       sizeof(struct rte_pmd_i40e_ptype_info));
2024 		pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
2025 				(struct i40e_profile_segment *)i40e_seg_hdr);
2026 		nb_tlv = i40e_get_tlv_section_size(pctype);
2027 		if (nb_tlv == 0)
2028 			return I40E_SUCCESS;
2029 		if (nb_proto_info < nb_tlv) {
2030 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
2031 			return -EINVAL;
2032 		}
2033 
2034 		/* get number of records in the section */
2035 		nb_rec = pctype->section.size /
2036 				sizeof(struct i40e_profile_tlv_section_record);
2037 		tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
2038 		for (i = j = 0; i < nb_rec; j++) {
2039 			memcpy(&pinfo[j], tlv->data,
2040 			       sizeof(struct rte_pmd_i40e_ptype_info));
2041 			i += tlv->len;
2042 			tlv = &tlv[tlv->len];
2043 		}
2044 		return I40E_SUCCESS;
2045 	}
2046 
2047 	/* get number of packet types */
2048 	if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
2049 		struct i40e_profile_section_header *ptype;
2050 
2051 		if (info_size < sizeof(uint32_t)) {
2052 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
2053 			return -EINVAL;
2054 		}
2055 		ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2056 				(struct i40e_profile_segment *)i40e_seg_hdr);
2057 		*(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
2058 		return I40E_SUCCESS;
2059 	}
2060 
2061 	/* get list of packet types */
2062 	if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2063 		uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2064 		struct rte_pmd_i40e_ptype_info *pinfo;
2065 		struct i40e_profile_section_header *ptype;
2066 		struct i40e_profile_tlv_section_record *tlv;
2067 
2068 		pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2069 		nb_proto_info = info_size /
2070 					sizeof(struct rte_pmd_i40e_ptype_info);
2071 		for (i = 0; i < nb_proto_info; i++)
2072 			memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2073 			       sizeof(struct rte_pmd_i40e_ptype_info));
2074 		ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2075 				(struct i40e_profile_segment *)i40e_seg_hdr);
2076 		nb_tlv = i40e_get_tlv_section_size(ptype);
2077 		if (nb_tlv == 0)
2078 			return I40E_SUCCESS;
2079 		if (nb_proto_info < nb_tlv) {
2080 			PMD_DRV_LOG(ERR, "Invalid information buffer size");
2081 			return -EINVAL;
2082 		}
2083 		/* get number of records in the section */
2084 		nb_rec = ptype->section.size /
2085 				sizeof(struct i40e_profile_tlv_section_record);
2086 		for (i = j = 0; i < nb_rec; j++) {
2087 			tlv = (struct i40e_profile_tlv_section_record *)
2088 								&ptype[1 + i];
2089 			memcpy(&pinfo[j], tlv->data,
2090 			       sizeof(struct rte_pmd_i40e_ptype_info));
2091 			i += tlv->len;
2092 		}
2093 		return I40E_SUCCESS;
2094 	}
2095 
2096 	PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2097 	return -EINVAL;
2098 }
2099 
2100 int
rte_pmd_i40e_get_ddp_list(uint16_t port,uint8_t * buff,uint32_t size)2101 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2102 {
2103 	struct rte_eth_dev *dev;
2104 	struct i40e_hw *hw;
2105 	enum i40e_status_code status = I40E_SUCCESS;
2106 
2107 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2108 
2109 	dev = &rte_eth_devices[port];
2110 
2111 	if (!is_i40e_supported(dev))
2112 		return -ENOTSUP;
2113 
2114 	if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2115 		return -EINVAL;
2116 
2117 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2118 
2119 	status = i40e_aq_get_ddp_list(hw, (void *)buff,
2120 				      size, 0, NULL);
2121 
2122 	return status;
2123 }
2124 
check_invalid_pkt_type(uint32_t pkt_type)2125 static int check_invalid_pkt_type(uint32_t pkt_type)
2126 {
2127 	uint32_t l2, l3, l4, tnl, il2, il3, il4;
2128 
2129 	l2 = pkt_type & RTE_PTYPE_L2_MASK;
2130 	l3 = pkt_type & RTE_PTYPE_L3_MASK;
2131 	l4 = pkt_type & RTE_PTYPE_L4_MASK;
2132 	tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2133 	il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2134 	il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2135 	il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2136 
2137 	if (l2 &&
2138 	    l2 != RTE_PTYPE_L2_ETHER &&
2139 	    l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2140 	    l2 != RTE_PTYPE_L2_ETHER_ARP &&
2141 	    l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2142 	    l2 != RTE_PTYPE_L2_ETHER_NSH &&
2143 	    l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2144 	    l2 != RTE_PTYPE_L2_ETHER_QINQ &&
2145 	    l2 != RTE_PTYPE_L2_ETHER_PPPOE)
2146 		return -1;
2147 
2148 	if (l3 &&
2149 	    l3 != RTE_PTYPE_L3_IPV4 &&
2150 	    l3 != RTE_PTYPE_L3_IPV4_EXT &&
2151 	    l3 != RTE_PTYPE_L3_IPV6 &&
2152 	    l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2153 	    l3 != RTE_PTYPE_L3_IPV6_EXT &&
2154 	    l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2155 		return -1;
2156 
2157 	if (l4 &&
2158 	    l4 != RTE_PTYPE_L4_TCP &&
2159 	    l4 != RTE_PTYPE_L4_UDP &&
2160 	    l4 != RTE_PTYPE_L4_FRAG &&
2161 	    l4 != RTE_PTYPE_L4_SCTP &&
2162 	    l4 != RTE_PTYPE_L4_ICMP &&
2163 	    l4 != RTE_PTYPE_L4_NONFRAG)
2164 		return -1;
2165 
2166 	if (tnl &&
2167 	    tnl != RTE_PTYPE_TUNNEL_IP &&
2168 	    tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2169 	    tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2170 	    tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2171 	    tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2172 	    tnl != RTE_PTYPE_TUNNEL_GTPC &&
2173 	    tnl != RTE_PTYPE_TUNNEL_GTPU &&
2174 	    tnl != RTE_PTYPE_TUNNEL_L2TP &&
2175 	    tnl != RTE_PTYPE_TUNNEL_ESP)
2176 		return -1;
2177 
2178 	if (il2 &&
2179 	    il2 != RTE_PTYPE_INNER_L2_ETHER &&
2180 	    il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2181 	    il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2182 		return -1;
2183 
2184 	if (il3 &&
2185 	    il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2186 	    il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2187 	    il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2188 	    il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2189 	    il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2190 	    il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2191 		return -1;
2192 
2193 	if (il4 &&
2194 	    il4 != RTE_PTYPE_INNER_L4_TCP &&
2195 	    il4 != RTE_PTYPE_INNER_L4_UDP &&
2196 	    il4 != RTE_PTYPE_INNER_L4_FRAG &&
2197 	    il4 != RTE_PTYPE_INNER_L4_SCTP &&
2198 	    il4 != RTE_PTYPE_INNER_L4_ICMP &&
2199 	    il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2200 		return -1;
2201 
2202 	return 0;
2203 }
2204 
check_invalid_ptype_mapping(struct rte_pmd_i40e_ptype_mapping * mapping_table,uint16_t count)2205 static int check_invalid_ptype_mapping(
2206 		struct rte_pmd_i40e_ptype_mapping *mapping_table,
2207 		uint16_t count)
2208 {
2209 	int i;
2210 
2211 	for (i = 0; i < count; i++) {
2212 		uint16_t ptype = mapping_table[i].hw_ptype;
2213 		uint32_t pkt_type = mapping_table[i].sw_ptype;
2214 
2215 		if (ptype >= I40E_MAX_PKT_TYPE)
2216 			return -1;
2217 
2218 		if (pkt_type == RTE_PTYPE_UNKNOWN)
2219 			continue;
2220 
2221 		if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2222 			continue;
2223 
2224 		if (check_invalid_pkt_type(pkt_type))
2225 			return -1;
2226 	}
2227 
2228 	return 0;
2229 }
2230 
2231 int
rte_pmd_i40e_ptype_mapping_update(uint16_t port,struct rte_pmd_i40e_ptype_mapping * mapping_items,uint16_t count,uint8_t exclusive)2232 rte_pmd_i40e_ptype_mapping_update(
2233 			uint16_t port,
2234 			struct rte_pmd_i40e_ptype_mapping *mapping_items,
2235 			uint16_t count,
2236 			uint8_t exclusive)
2237 {
2238 	struct rte_eth_dev *dev;
2239 	struct i40e_adapter *ad;
2240 	int i;
2241 
2242 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2243 
2244 	dev = &rte_eth_devices[port];
2245 
2246 	if (!is_i40e_supported(dev))
2247 		return -ENOTSUP;
2248 
2249 	if (count > I40E_MAX_PKT_TYPE)
2250 		return -EINVAL;
2251 
2252 	if (check_invalid_ptype_mapping(mapping_items, count))
2253 		return -EINVAL;
2254 
2255 	ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2256 
2257 	if (exclusive) {
2258 		for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2259 			ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2260 	}
2261 
2262 	for (i = 0; i < count; i++)
2263 		ad->ptype_tbl[mapping_items[i].hw_ptype]
2264 			= mapping_items[i].sw_ptype;
2265 
2266 	return 0;
2267 }
2268 
rte_pmd_i40e_ptype_mapping_reset(uint16_t port)2269 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2270 {
2271 	struct rte_eth_dev *dev;
2272 
2273 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2274 
2275 	dev = &rte_eth_devices[port];
2276 
2277 	if (!is_i40e_supported(dev))
2278 		return -ENOTSUP;
2279 
2280 	i40e_set_default_ptype_table(dev);
2281 
2282 	return 0;
2283 }
2284 
rte_pmd_i40e_ptype_mapping_get(uint16_t port,struct rte_pmd_i40e_ptype_mapping * mapping_items,uint16_t size,uint16_t * count,uint8_t valid_only)2285 int rte_pmd_i40e_ptype_mapping_get(
2286 			uint16_t port,
2287 			struct rte_pmd_i40e_ptype_mapping *mapping_items,
2288 			uint16_t size,
2289 			uint16_t *count,
2290 			uint8_t valid_only)
2291 {
2292 	struct rte_eth_dev *dev;
2293 	struct i40e_adapter *ad;
2294 	int n = 0;
2295 	uint16_t i;
2296 
2297 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2298 
2299 	dev = &rte_eth_devices[port];
2300 
2301 	if (!is_i40e_supported(dev))
2302 		return -ENOTSUP;
2303 
2304 	ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2305 
2306 	for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2307 		if (n >= size)
2308 			break;
2309 		if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2310 			continue;
2311 		mapping_items[n].hw_ptype = i;
2312 		mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2313 		n++;
2314 	}
2315 
2316 	*count = n;
2317 	return 0;
2318 }
2319 
rte_pmd_i40e_ptype_mapping_replace(uint16_t port,uint32_t target,uint8_t mask,uint32_t pkt_type)2320 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2321 				       uint32_t target,
2322 				       uint8_t mask,
2323 				       uint32_t pkt_type)
2324 {
2325 	struct rte_eth_dev *dev;
2326 	struct i40e_adapter *ad;
2327 	uint16_t i;
2328 
2329 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2330 
2331 	dev = &rte_eth_devices[port];
2332 
2333 	if (!is_i40e_supported(dev))
2334 		return -ENOTSUP;
2335 
2336 	if (!mask && check_invalid_pkt_type(target))
2337 		return -EINVAL;
2338 
2339 	if (check_invalid_pkt_type(pkt_type))
2340 		return -EINVAL;
2341 
2342 	ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2343 
2344 	for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2345 		if (mask) {
2346 			if ((target | ad->ptype_tbl[i]) == target &&
2347 			    (target & ad->ptype_tbl[i]))
2348 				ad->ptype_tbl[i] = pkt_type;
2349 		} else {
2350 			if (ad->ptype_tbl[i] == target)
2351 				ad->ptype_tbl[i] = pkt_type;
2352 		}
2353 	}
2354 
2355 	return 0;
2356 }
2357 
2358 int
rte_pmd_i40e_add_vf_mac_addr(uint16_t port,uint16_t vf_id,struct rte_ether_addr * mac_addr)2359 rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
2360 			     struct rte_ether_addr *mac_addr)
2361 {
2362 	struct rte_eth_dev *dev;
2363 	struct i40e_pf_vf *vf;
2364 	struct i40e_vsi *vsi;
2365 	struct i40e_pf *pf;
2366 	struct i40e_mac_filter_info mac_filter;
2367 	int ret;
2368 
2369 	if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2370 		return -EINVAL;
2371 
2372 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2373 
2374 	dev = &rte_eth_devices[port];
2375 
2376 	if (!is_i40e_supported(dev))
2377 		return -ENOTSUP;
2378 
2379 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2380 
2381 	if (vf_id >= pf->vf_num || !pf->vfs)
2382 		return -EINVAL;
2383 
2384 	vf = &pf->vfs[vf_id];
2385 	vsi = vf->vsi;
2386 	if (!vsi) {
2387 		PMD_DRV_LOG(ERR, "Invalid VSI.");
2388 		return -EINVAL;
2389 	}
2390 
2391 	mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
2392 	rte_ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2393 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
2394 	if (ret != I40E_SUCCESS) {
2395 		PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2396 		return -1;
2397 	}
2398 
2399 	return 0;
2400 }
2401 
rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)2402 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
2403 {
2404 	struct rte_eth_dev *dev;
2405 
2406 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2407 
2408 	dev = &rte_eth_devices[port];
2409 
2410 	if (!is_i40e_supported(dev) &&
2411 	    !is_i40evf_supported(dev))
2412 		return -ENOTSUP;
2413 
2414 	i40e_set_default_pctype_table(dev);
2415 
2416 	return 0;
2417 }
2418 
rte_pmd_i40e_flow_type_mapping_get(uint16_t port,struct rte_pmd_i40e_flow_type_mapping * mapping_items)2419 int rte_pmd_i40e_flow_type_mapping_get(
2420 			uint16_t port,
2421 			struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2422 {
2423 	struct rte_eth_dev *dev;
2424 	struct i40e_adapter *ad;
2425 	uint16_t i;
2426 
2427 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2428 
2429 	dev = &rte_eth_devices[port];
2430 
2431 	if (!is_i40e_supported(dev) &&
2432 	    !is_i40evf_supported(dev))
2433 		return -ENOTSUP;
2434 
2435 	ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2436 
2437 	for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2438 		mapping_items[i].flow_type = i;
2439 		mapping_items[i].pctype = ad->pctypes_tbl[i];
2440 	}
2441 
2442 	return 0;
2443 }
2444 
2445 int
rte_pmd_i40e_flow_type_mapping_update(uint16_t port,struct rte_pmd_i40e_flow_type_mapping * mapping_items,uint16_t count,uint8_t exclusive)2446 rte_pmd_i40e_flow_type_mapping_update(
2447 			uint16_t port,
2448 			struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2449 			uint16_t count,
2450 			uint8_t exclusive)
2451 {
2452 	struct rte_eth_dev *dev;
2453 	struct i40e_adapter *ad;
2454 	int i;
2455 
2456 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2457 
2458 	dev = &rte_eth_devices[port];
2459 
2460 	if (!is_i40e_supported(dev) &&
2461 	    !is_i40evf_supported(dev))
2462 		return -ENOTSUP;
2463 
2464 	if (count > I40E_FLOW_TYPE_MAX)
2465 		return -EINVAL;
2466 
2467 	for (i = 0; i < count; i++)
2468 		if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2469 		    mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2470 		    (mapping_items[i].pctype &
2471 		    (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2472 			return -EINVAL;
2473 
2474 	ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2475 
2476 	if (exclusive) {
2477 		for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2478 			ad->pctypes_tbl[i] = 0ULL;
2479 		ad->flow_types_mask = 0ULL;
2480 	}
2481 
2482 	for (i = 0; i < count; i++) {
2483 		ad->pctypes_tbl[mapping_items[i].flow_type] =
2484 						mapping_items[i].pctype;
2485 		if (mapping_items[i].pctype)
2486 			ad->flow_types_mask |=
2487 					(1ULL << mapping_items[i].flow_type);
2488 		else
2489 			ad->flow_types_mask &=
2490 					~(1ULL << mapping_items[i].flow_type);
2491 	}
2492 
2493 	for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2494 		ad->pctypes_mask |= ad->pctypes_tbl[i];
2495 
2496 	return 0;
2497 }
2498 
2499 int
rte_pmd_i40e_query_vfid_by_mac(uint16_t port,const struct rte_ether_addr * vf_mac)2500 rte_pmd_i40e_query_vfid_by_mac(uint16_t port,
2501 			const struct rte_ether_addr *vf_mac)
2502 {
2503 	struct rte_eth_dev *dev;
2504 	struct rte_ether_addr *mac;
2505 	struct i40e_pf *pf;
2506 	int vf_id;
2507 	struct i40e_pf_vf *vf;
2508 	uint16_t vf_num;
2509 
2510 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2511 	dev = &rte_eth_devices[port];
2512 
2513 	if (!is_i40e_supported(dev))
2514 		return -ENOTSUP;
2515 
2516 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2517 	vf_num = pf->vf_num;
2518 
2519 	for (vf_id = 0; vf_id < vf_num; vf_id++) {
2520 		vf = &pf->vfs[vf_id];
2521 		mac = &vf->mac_addr;
2522 
2523 		if (rte_is_same_ether_addr(mac, vf_mac))
2524 			return vf_id;
2525 	}
2526 
2527 	return -EINVAL;
2528 }
2529 
2530 static int
i40e_vsi_update_queue_region_mapping(struct i40e_hw * hw,struct i40e_pf * pf)2531 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2532 			      struct i40e_pf *pf)
2533 {
2534 	uint16_t i;
2535 	struct i40e_vsi *vsi = pf->main_vsi;
2536 	uint16_t queue_offset, bsf, tc_index;
2537 	struct i40e_vsi_context ctxt;
2538 	struct i40e_aqc_vsi_properties_data *vsi_info;
2539 	struct i40e_queue_regions *region_info =
2540 				&pf->queue_region;
2541 	int32_t ret = -EINVAL;
2542 
2543 	if (!region_info->queue_region_number) {
2544 		PMD_INIT_LOG(ERR, "there is no that region id been set before");
2545 		return ret;
2546 	}
2547 
2548 	memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2549 
2550 	/* Update Queue Pairs Mapping for currently enabled UPs */
2551 	ctxt.seid = vsi->seid;
2552 	ctxt.pf_num = hw->pf_id;
2553 	ctxt.vf_num = 0;
2554 	ctxt.uplink_seid = vsi->uplink_seid;
2555 	ctxt.info = vsi->info;
2556 	vsi_info = &ctxt.info;
2557 
2558 	memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2559 	memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2560 
2561 	/* Configure queue region and queue mapping parameters,
2562 	 * for enabled queue region, allocate queues to this region.
2563 	 */
2564 
2565 	for (i = 0; i < region_info->queue_region_number; i++) {
2566 		tc_index = region_info->region[i].region_id;
2567 		bsf = rte_bsf32(region_info->region[i].queue_num);
2568 		queue_offset = region_info->region[i].queue_start_index;
2569 		vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2570 			(queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2571 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2572 	}
2573 
2574 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2575 	vsi_info->mapping_flags |=
2576 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2577 	vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2578 	vsi_info->valid_sections |=
2579 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2580 
2581 	/* Update the VSI after updating the VSI queue-mapping information */
2582 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2583 	if (ret) {
2584 		PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2585 				hw->aq.asq_last_status);
2586 		return ret;
2587 	}
2588 	/* update the local VSI info with updated queue map */
2589 	rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2590 					sizeof(vsi->info.tc_mapping));
2591 	rte_memcpy(&vsi->info.queue_mapping,
2592 			&ctxt.info.queue_mapping,
2593 			sizeof(vsi->info.queue_mapping));
2594 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
2595 	vsi->info.valid_sections = 0;
2596 
2597 	return 0;
2598 }
2599 
2600 
2601 static int
i40e_queue_region_set_region(struct i40e_pf * pf,struct rte_pmd_i40e_queue_region_conf * conf_ptr)2602 i40e_queue_region_set_region(struct i40e_pf *pf,
2603 				struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2604 {
2605 	uint16_t i;
2606 	struct i40e_vsi *main_vsi = pf->main_vsi;
2607 	struct i40e_queue_regions *info = &pf->queue_region;
2608 	int32_t ret = -EINVAL;
2609 
2610 	if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2611 				conf_ptr->queue_num <= 64)) {
2612 		PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2613 			"total number of queues do not exceed the VSI allocation");
2614 		return ret;
2615 	}
2616 
2617 	if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2618 		PMD_DRV_LOG(ERR, "the queue region max index is 7");
2619 		return ret;
2620 	}
2621 
2622 	if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2623 					> main_vsi->nb_used_qps) {
2624 		PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2625 		return ret;
2626 	}
2627 
2628 	for (i = 0; i < info->queue_region_number; i++)
2629 		if (conf_ptr->region_id == info->region[i].region_id)
2630 			break;
2631 
2632 	if (i == info->queue_region_number &&
2633 				i <= I40E_REGION_MAX_INDEX) {
2634 		info->region[i].region_id = conf_ptr->region_id;
2635 		info->region[i].queue_num = conf_ptr->queue_num;
2636 		info->region[i].queue_start_index =
2637 			conf_ptr->queue_start_index;
2638 		info->queue_region_number++;
2639 	} else {
2640 		PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2641 		return ret;
2642 	}
2643 
2644 	return 0;
2645 }
2646 
2647 static int
i40e_queue_region_set_flowtype(struct i40e_pf * pf,struct rte_pmd_i40e_queue_region_conf * rss_region_conf)2648 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2649 			struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2650 {
2651 	int32_t ret = -EINVAL;
2652 	struct i40e_queue_regions *info = &pf->queue_region;
2653 	uint16_t i, j;
2654 	uint16_t region_index, flowtype_index;
2655 
2656 	/* For the pctype or hardware flowtype of packet,
2657 	 * the specific index for each type has been defined
2658 	 * in file i40e_type.h as enum i40e_filter_pctype.
2659 	 */
2660 
2661 	if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2662 		PMD_DRV_LOG(ERR, "the queue region max index is 7");
2663 		return ret;
2664 	}
2665 
2666 	if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2667 		PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2668 		return ret;
2669 	}
2670 
2671 
2672 	for (i = 0; i < info->queue_region_number; i++)
2673 		if (rss_region_conf->region_id == info->region[i].region_id)
2674 			break;
2675 
2676 	if (i == info->queue_region_number) {
2677 		PMD_DRV_LOG(ERR, "that region id has not been set before");
2678 		ret = -EINVAL;
2679 		return ret;
2680 	}
2681 	region_index = i;
2682 
2683 	for (i = 0; i < info->queue_region_number; i++) {
2684 		for (j = 0; j < info->region[i].flowtype_num; j++) {
2685 			if (rss_region_conf->hw_flowtype ==
2686 				info->region[i].hw_flowtype[j]) {
2687 				PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2688 				return 0;
2689 			}
2690 		}
2691 	}
2692 
2693 	flowtype_index = info->region[region_index].flowtype_num;
2694 	info->region[region_index].hw_flowtype[flowtype_index] =
2695 					rss_region_conf->hw_flowtype;
2696 	info->region[region_index].flowtype_num++;
2697 
2698 	return 0;
2699 }
2700 
2701 static void
i40e_queue_region_pf_flowtype_conf(struct i40e_hw * hw,struct i40e_pf * pf)2702 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2703 				struct i40e_pf *pf)
2704 {
2705 	uint8_t hw_flowtype;
2706 	uint32_t pfqf_hregion;
2707 	uint16_t i, j, index;
2708 	struct i40e_queue_regions *info = &pf->queue_region;
2709 
2710 	/* For the pctype or hardware flowtype of packet,
2711 	 * the specific index for each type has been defined
2712 	 * in file i40e_type.h as enum i40e_filter_pctype.
2713 	 */
2714 
2715 	for (i = 0; i < info->queue_region_number; i++) {
2716 		for (j = 0; j < info->region[i].flowtype_num; j++) {
2717 			hw_flowtype = info->region[i].hw_flowtype[j];
2718 			index = hw_flowtype >> 3;
2719 			pfqf_hregion =
2720 				i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2721 
2722 			if ((hw_flowtype & 0x7) == 0) {
2723 				pfqf_hregion |= info->region[i].region_id <<
2724 					I40E_PFQF_HREGION_REGION_0_SHIFT;
2725 				pfqf_hregion |= 1 <<
2726 					I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2727 			} else if ((hw_flowtype & 0x7) == 1) {
2728 				pfqf_hregion |= info->region[i].region_id  <<
2729 					I40E_PFQF_HREGION_REGION_1_SHIFT;
2730 				pfqf_hregion |= 1 <<
2731 					I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2732 			} else if ((hw_flowtype & 0x7) == 2) {
2733 				pfqf_hregion |= info->region[i].region_id  <<
2734 					I40E_PFQF_HREGION_REGION_2_SHIFT;
2735 				pfqf_hregion |= 1 <<
2736 					I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2737 			} else if ((hw_flowtype & 0x7) == 3) {
2738 				pfqf_hregion |= info->region[i].region_id  <<
2739 					I40E_PFQF_HREGION_REGION_3_SHIFT;
2740 				pfqf_hregion |= 1 <<
2741 					I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2742 			} else if ((hw_flowtype & 0x7) == 4) {
2743 				pfqf_hregion |= info->region[i].region_id  <<
2744 					I40E_PFQF_HREGION_REGION_4_SHIFT;
2745 				pfqf_hregion |= 1 <<
2746 					I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2747 			} else if ((hw_flowtype & 0x7) == 5) {
2748 				pfqf_hregion |= info->region[i].region_id  <<
2749 					I40E_PFQF_HREGION_REGION_5_SHIFT;
2750 				pfqf_hregion |= 1 <<
2751 					I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2752 			} else if ((hw_flowtype & 0x7) == 6) {
2753 				pfqf_hregion |= info->region[i].region_id  <<
2754 					I40E_PFQF_HREGION_REGION_6_SHIFT;
2755 				pfqf_hregion |= 1 <<
2756 					I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2757 			} else {
2758 				pfqf_hregion |= info->region[i].region_id  <<
2759 					I40E_PFQF_HREGION_REGION_7_SHIFT;
2760 				pfqf_hregion |= 1 <<
2761 					I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2762 			}
2763 
2764 			i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2765 						pfqf_hregion);
2766 		}
2767 	}
2768 }
2769 
2770 static int
i40e_queue_region_set_user_priority(struct i40e_pf * pf,struct rte_pmd_i40e_queue_region_conf * rss_region_conf)2771 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2772 		struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2773 {
2774 	struct i40e_queue_regions *info = &pf->queue_region;
2775 	int32_t ret = -EINVAL;
2776 	uint16_t i, j, region_index;
2777 
2778 	if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2779 		PMD_DRV_LOG(ERR, "the queue region max index is 7");
2780 		return ret;
2781 	}
2782 
2783 	if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2784 		PMD_DRV_LOG(ERR, "the region_id max index is 7");
2785 		return ret;
2786 	}
2787 
2788 	for (i = 0; i < info->queue_region_number; i++)
2789 		if (rss_region_conf->region_id == info->region[i].region_id)
2790 			break;
2791 
2792 	if (i == info->queue_region_number) {
2793 		PMD_DRV_LOG(ERR, "that region id has not been set before");
2794 		ret = -EINVAL;
2795 		return ret;
2796 	}
2797 
2798 	region_index = i;
2799 
2800 	for (i = 0; i < info->queue_region_number; i++) {
2801 		for (j = 0; j < info->region[i].user_priority_num; j++) {
2802 			if (info->region[i].user_priority[j] ==
2803 				rss_region_conf->user_priority) {
2804 				PMD_DRV_LOG(ERR, "that user priority has been set before");
2805 				return 0;
2806 			}
2807 		}
2808 	}
2809 
2810 	j = info->region[region_index].user_priority_num;
2811 	info->region[region_index].user_priority[j] =
2812 					rss_region_conf->user_priority;
2813 	info->region[region_index].user_priority_num++;
2814 
2815 	return 0;
2816 }
2817 
2818 static int
i40e_queue_region_dcb_configure(struct i40e_hw * hw,struct i40e_pf * pf)2819 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2820 				struct i40e_pf *pf)
2821 {
2822 	struct i40e_dcbx_config dcb_cfg_local;
2823 	struct i40e_dcbx_config *dcb_cfg;
2824 	struct i40e_queue_regions *info = &pf->queue_region;
2825 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2826 	int32_t ret = -EINVAL;
2827 	uint16_t i, j, prio_index, region_index;
2828 	uint8_t tc_map, tc_bw, bw_lf, dcb_flag = 0;
2829 
2830 	if (!info->queue_region_number) {
2831 		PMD_DRV_LOG(ERR, "No queue region been set before");
2832 		return ret;
2833 	}
2834 
2835 	for (i = 0; i < info->queue_region_number; i++) {
2836 		if (info->region[i].user_priority_num) {
2837 			dcb_flag = 1;
2838 			break;
2839 		}
2840 	}
2841 
2842 	if (dcb_flag == 0)
2843 		return 0;
2844 
2845 	dcb_cfg = &dcb_cfg_local;
2846 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2847 
2848 	/* assume each tc has the same bw */
2849 	tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2850 	for (i = 0; i < info->queue_region_number; i++)
2851 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2852 	/* to ensure the sum of tcbw is equal to 100 */
2853 	bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
2854 	for (i = 0; i < bw_lf; i++)
2855 		dcb_cfg->etscfg.tcbwtable[i]++;
2856 
2857 	/* assume each tc has the same Transmission Selection Algorithm */
2858 	for (i = 0; i < info->queue_region_number; i++)
2859 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2860 
2861 	for (i = 0; i < info->queue_region_number; i++) {
2862 		for (j = 0; j < info->region[i].user_priority_num; j++) {
2863 			prio_index = info->region[i].user_priority[j];
2864 			region_index = info->region[i].region_id;
2865 			dcb_cfg->etscfg.prioritytable[prio_index] =
2866 						region_index;
2867 		}
2868 	}
2869 
2870 	/* FW needs one App to configure HW */
2871 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2872 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2873 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2874 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2875 
2876 	tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2877 
2878 	dcb_cfg->pfc.willing = 0;
2879 	dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2880 	dcb_cfg->pfc.pfcenable = tc_map;
2881 
2882 	/* Copy the new config to the current config */
2883 	*old_cfg = *dcb_cfg;
2884 	old_cfg->etsrec = old_cfg->etscfg;
2885 	ret = i40e_set_dcb_config(hw);
2886 
2887 	if (ret) {
2888 		PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2889 			 i40e_stat_str(hw, ret),
2890 			 i40e_aq_str(hw, hw->aq.asq_last_status));
2891 		return ret;
2892 	}
2893 
2894 	return 0;
2895 }
2896 
2897 int
i40e_flush_queue_region_all_conf(struct rte_eth_dev * dev,struct i40e_hw * hw,struct i40e_pf * pf,uint16_t on)2898 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2899 	struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2900 {
2901 	int32_t ret = -EINVAL;
2902 	struct i40e_queue_regions *info = &pf->queue_region;
2903 	struct i40e_vsi *main_vsi = pf->main_vsi;
2904 
2905 	if (on) {
2906 		i40e_queue_region_pf_flowtype_conf(hw, pf);
2907 
2908 		ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2909 		if (ret != I40E_SUCCESS) {
2910 			PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2911 			return ret;
2912 		}
2913 
2914 		ret = i40e_queue_region_dcb_configure(hw, pf);
2915 		if (ret != I40E_SUCCESS) {
2916 			PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2917 			return ret;
2918 		}
2919 
2920 		return 0;
2921 	}
2922 
2923 	if (info->queue_region_number) {
2924 		info->queue_region_number = 1;
2925 		info->region[0].queue_num = main_vsi->nb_used_qps;
2926 		info->region[0].queue_start_index = 0;
2927 
2928 		ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2929 		if (ret != I40E_SUCCESS)
2930 			PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2931 
2932 		ret = i40e_dcb_init_configure(dev, TRUE);
2933 		if (ret != I40E_SUCCESS) {
2934 			PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2935 			pf->flags &= ~I40E_FLAG_DCB;
2936 		}
2937 
2938 		i40e_init_queue_region_conf(dev);
2939 	}
2940 	return 0;
2941 }
2942 
2943 static int
i40e_queue_region_pf_check_rss(struct i40e_pf * pf)2944 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2945 {
2946 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2947 	uint64_t hena;
2948 
2949 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2950 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2951 
2952 	if (!hena)
2953 		return -ENOTSUP;
2954 
2955 	return 0;
2956 }
2957 
2958 static int
i40e_queue_region_get_all_info(struct i40e_pf * pf,struct i40e_queue_regions * regions_ptr)2959 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2960 		struct i40e_queue_regions *regions_ptr)
2961 {
2962 	struct i40e_queue_regions *info = &pf->queue_region;
2963 
2964 	rte_memcpy(regions_ptr, info,
2965 			sizeof(struct i40e_queue_regions));
2966 
2967 	return 0;
2968 }
2969 
rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,enum rte_pmd_i40e_queue_region_op op_type,void * arg)2970 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2971 		enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2972 {
2973 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2974 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2975 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2976 	int32_t ret;
2977 
2978 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2979 
2980 	if (!is_i40e_supported(dev))
2981 		return -ENOTSUP;
2982 
2983 	if (!(!i40e_queue_region_pf_check_rss(pf)))
2984 		return -ENOTSUP;
2985 
2986 	/* This queue region feature only support pf by now. It should
2987 	 * be called after dev_start, and will be clear after dev_stop.
2988 	 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2989 	 * is just an enable function which server for other configuration,
2990 	 * it is for all configuration about queue region from up layer,
2991 	 * at first will only keep in DPDK softwarestored in driver,
2992 	 * only after "FLUSH_ON", it commit all configuration to HW.
2993 	 * Because PMD had to set hardware configuration at a time, so
2994 	 * it will record all up layer command at first.
2995 	 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2996 	 * just clean all configuration about queue region just now,
2997 	 * and restore all to DPDK i40e driver default
2998 	 * config when start up.
2999 	 */
3000 
3001 	switch (op_type) {
3002 	case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
3003 		ret = i40e_queue_region_set_region(pf,
3004 				(struct rte_pmd_i40e_queue_region_conf *)arg);
3005 		break;
3006 	case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
3007 		ret = i40e_queue_region_set_flowtype(pf,
3008 				(struct rte_pmd_i40e_queue_region_conf *)arg);
3009 		break;
3010 	case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
3011 		ret = i40e_queue_region_set_user_priority(pf,
3012 				(struct rte_pmd_i40e_queue_region_conf *)arg);
3013 		break;
3014 	case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
3015 		ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
3016 		break;
3017 	case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
3018 		ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
3019 		break;
3020 	case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
3021 		ret = i40e_queue_region_get_all_info(pf,
3022 				(struct i40e_queue_regions *)arg);
3023 		break;
3024 	default:
3025 		PMD_DRV_LOG(WARNING, "op type (%d) not supported",
3026 			    op_type);
3027 		ret = -EINVAL;
3028 	}
3029 
3030 	I40E_WRITE_FLUSH(hw);
3031 
3032 	return ret;
3033 }
3034 
rte_pmd_i40e_flow_add_del_packet_template(uint16_t port,const struct rte_pmd_i40e_pkt_template_conf * conf,uint8_t add)3035 int rte_pmd_i40e_flow_add_del_packet_template(
3036 			uint16_t port,
3037 			const struct rte_pmd_i40e_pkt_template_conf *conf,
3038 			uint8_t add)
3039 {
3040 	struct rte_eth_dev *dev = &rte_eth_devices[port];
3041 	struct i40e_fdir_filter_conf filter_conf;
3042 
3043 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3044 
3045 	if (!is_i40e_supported(dev))
3046 		return -ENOTSUP;
3047 
3048 	memset(&filter_conf, 0, sizeof(filter_conf));
3049 	filter_conf.soft_id = conf->soft_id;
3050 	filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
3051 	filter_conf.input.flow.raw_flow.packet = conf->input.packet;
3052 	filter_conf.input.flow.raw_flow.length = conf->input.length;
3053 	filter_conf.input.flow_ext.pkt_template = true;
3054 
3055 	filter_conf.action.rx_queue = conf->action.rx_queue;
3056 	filter_conf.action.behavior =
3057 		(enum i40e_fdir_behavior)conf->action.behavior;
3058 	filter_conf.action.report_status =
3059 		(enum i40e_fdir_status)conf->action.report_status;
3060 	filter_conf.action.flex_off = conf->action.flex_off;
3061 
3062 	return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
3063 }
3064 
3065 int
rte_pmd_i40e_inset_get(uint16_t port,uint8_t pctype,struct rte_pmd_i40e_inset * inset,enum rte_pmd_i40e_inset_type inset_type)3066 rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
3067 		       struct rte_pmd_i40e_inset *inset,
3068 		       enum rte_pmd_i40e_inset_type inset_type)
3069 {
3070 	struct rte_eth_dev *dev;
3071 	struct i40e_hw *hw;
3072 	uint64_t inset_reg;
3073 	uint32_t mask_reg[2];
3074 	int i;
3075 
3076 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3077 
3078 	dev = &rte_eth_devices[port];
3079 
3080 	if (!is_i40e_supported(dev))
3081 		return -ENOTSUP;
3082 
3083 	if (pctype > 63)
3084 		return -EINVAL;
3085 
3086 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3087 	memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
3088 
3089 	switch (inset_type) {
3090 	case INSET_HASH:
3091 		/* Get input set */
3092 		inset_reg =
3093 			i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
3094 		inset_reg <<= I40E_32_BIT_WIDTH;
3095 		inset_reg |=
3096 			i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
3097 		/* Get field mask */
3098 		mask_reg[0] =
3099 			i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
3100 		mask_reg[1] =
3101 			i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
3102 		break;
3103 	case INSET_FDIR:
3104 		inset_reg =
3105 			i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
3106 		inset_reg <<= I40E_32_BIT_WIDTH;
3107 		inset_reg |=
3108 			i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
3109 		mask_reg[0] =
3110 			i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
3111 		mask_reg[1] =
3112 			i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
3113 		break;
3114 	case INSET_FDIR_FLX:
3115 		inset_reg =
3116 			i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
3117 		mask_reg[0] =
3118 			i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
3119 		mask_reg[1] =
3120 			i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
3121 		break;
3122 	default:
3123 		PMD_DRV_LOG(ERR, "Unsupported input set type.");
3124 		return -EINVAL;
3125 	}
3126 
3127 	inset->inset = inset_reg;
3128 
3129 	for (i = 0; i < 2; i++) {
3130 		inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
3131 		inset->mask[i].mask = mask_reg[i] & 0xFFFF;
3132 	}
3133 
3134 	return 0;
3135 }
3136 
3137 int
rte_pmd_i40e_inset_set(uint16_t port,uint8_t pctype,struct rte_pmd_i40e_inset * inset,enum rte_pmd_i40e_inset_type inset_type)3138 rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
3139 		       struct rte_pmd_i40e_inset *inset,
3140 		       enum rte_pmd_i40e_inset_type inset_type)
3141 {
3142 	struct rte_eth_dev *dev;
3143 	struct i40e_hw *hw;
3144 	struct i40e_pf *pf;
3145 	uint64_t inset_reg;
3146 	uint32_t mask_reg[2];
3147 	int i;
3148 
3149 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3150 
3151 	dev = &rte_eth_devices[port];
3152 
3153 	if (!is_i40e_supported(dev))
3154 		return -ENOTSUP;
3155 
3156 	if (pctype > 63)
3157 		return -EINVAL;
3158 
3159 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3160 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3161 
3162 	if (pf->support_multi_driver) {
3163 		PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
3164 		return -ENOTSUP;
3165 	}
3166 
3167 	inset_reg = inset->inset;
3168 	for (i = 0; i < 2; i++)
3169 		mask_reg[i] = (inset->mask[i].field_idx << 16) |
3170 			inset->mask[i].mask;
3171 
3172 	switch (inset_type) {
3173 	case INSET_HASH:
3174 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
3175 					    (uint32_t)(inset_reg & UINT32_MAX));
3176 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
3177 					    (uint32_t)((inset_reg >>
3178 					     I40E_32_BIT_WIDTH) & UINT32_MAX));
3179 		for (i = 0; i < 2; i++)
3180 			i40e_check_write_global_reg(hw,
3181 						  I40E_GLQF_HASH_MSK(i, pctype),
3182 						  mask_reg[i]);
3183 		break;
3184 	case INSET_FDIR:
3185 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
3186 				     (uint32_t)(inset_reg & UINT32_MAX));
3187 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
3188 				     (uint32_t)((inset_reg >>
3189 					      I40E_32_BIT_WIDTH) & UINT32_MAX));
3190 		for (i = 0; i < 2; i++)
3191 			i40e_check_write_global_reg(hw,
3192 						    I40E_GLQF_FD_MSK(i, pctype),
3193 						    mask_reg[i]);
3194 		break;
3195 	case INSET_FDIR_FLX:
3196 		i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
3197 				     (uint32_t)(inset_reg & UINT32_MAX));
3198 		for (i = 0; i < 2; i++)
3199 			i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
3200 					     mask_reg[i]);
3201 		break;
3202 	default:
3203 		PMD_DRV_LOG(ERR, "Unsupported input set type.");
3204 		return -EINVAL;
3205 	}
3206 
3207 	I40E_WRITE_FLUSH(hw);
3208 	return 0;
3209 }
3210 
3211 int
rte_pmd_i40e_get_fdir_info(uint16_t port,struct rte_eth_fdir_info * fdir_info)3212 rte_pmd_i40e_get_fdir_info(uint16_t port, struct rte_eth_fdir_info *fdir_info)
3213 {
3214 	struct rte_eth_dev *dev;
3215 
3216 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3217 
3218 	dev = &rte_eth_devices[port];
3219 	if (!is_i40e_supported(dev))
3220 		return -ENOTSUP;
3221 
3222 	i40e_fdir_info_get(dev, fdir_info);
3223 
3224 	return 0;
3225 }
3226 
3227 int
rte_pmd_i40e_get_fdir_stats(uint16_t port,struct rte_eth_fdir_stats * fdir_stat)3228 rte_pmd_i40e_get_fdir_stats(uint16_t port, struct rte_eth_fdir_stats *fdir_stat)
3229 {
3230 	struct rte_eth_dev *dev;
3231 
3232 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3233 
3234 	dev = &rte_eth_devices[port];
3235 	if (!is_i40e_supported(dev))
3236 		return -ENOTSUP;
3237 
3238 	i40e_fdir_stats_get(dev, fdir_stat);
3239 
3240 	return 0;
3241 }
3242 
3243 int
rte_pmd_i40e_set_gre_key_len(uint16_t port,uint8_t len)3244 rte_pmd_i40e_set_gre_key_len(uint16_t port, uint8_t len)
3245 {
3246 	struct rte_eth_dev *dev;
3247 	struct i40e_pf *pf;
3248 	struct i40e_hw *hw;
3249 
3250 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
3251 
3252 	dev = &rte_eth_devices[port];
3253 	if (!is_i40e_supported(dev))
3254 		return -ENOTSUP;
3255 
3256 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3257 	hw = I40E_PF_TO_HW(pf);
3258 
3259 	return i40e_dev_set_gre_key_len(hw, len);
3260 }
3261 
3262 int
rte_pmd_i40e_set_switch_dev(uint16_t port_id,struct rte_eth_dev * switch_dev)3263 rte_pmd_i40e_set_switch_dev(uint16_t port_id, struct rte_eth_dev *switch_dev)
3264 {
3265 	struct rte_eth_dev *i40e_dev;
3266 	struct i40e_hw *hw;
3267 
3268 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3269 
3270 	i40e_dev = &rte_eth_devices[port_id];
3271 	if (!is_i40e_supported(i40e_dev))
3272 		return -ENOTSUP;
3273 
3274 	hw = I40E_DEV_PRIVATE_TO_HW(i40e_dev->data->dev_private);
3275 	if (!hw)
3276 		return -1;
3277 
3278 	hw->switch_dev = switch_dev;
3279 
3280 	return 0;
3281 }
3282