xref: /f-stack/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <rte_ethdev_driver.h>
6 
7 #include "base/ixgbe_api.h"
8 #include "base/ixgbe_x550.h"
9 #include "ixgbe_ethdev.h"
10 #include "rte_pmd_ixgbe.h"
11 
12 int
rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port,uint16_t vf,struct rte_ether_addr * mac_addr)13 rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
14 			      struct rte_ether_addr *mac_addr)
15 {
16 	struct ixgbe_hw *hw;
17 	struct ixgbe_vf_info *vfinfo;
18 	int rar_entry;
19 	uint8_t *new_mac = (uint8_t *)(mac_addr);
20 	struct rte_eth_dev *dev;
21 	struct rte_pci_device *pci_dev;
22 
23 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
24 
25 	dev = &rte_eth_devices[port];
26 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
27 
28 	if (!is_ixgbe_supported(dev))
29 		return -ENOTSUP;
30 
31 	if (vf >= pci_dev->max_vfs)
32 		return -EINVAL;
33 
34 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
35 	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
36 	rar_entry = hw->mac.num_rar_entries - (vf + 1);
37 
38 	if (rte_is_valid_assigned_ether_addr(
39 			(struct rte_ether_addr *)new_mac)) {
40 		rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
41 			   RTE_ETHER_ADDR_LEN);
42 		return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
43 					   IXGBE_RAH_AV);
44 	}
45 	return -EINVAL;
46 }
47 
48 int
rte_pmd_ixgbe_ping_vf(uint16_t port,uint16_t vf)49 rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
50 {
51 	struct ixgbe_hw *hw;
52 	struct ixgbe_vf_info *vfinfo;
53 	struct rte_eth_dev *dev;
54 	struct rte_pci_device *pci_dev;
55 	uint32_t ctrl;
56 
57 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
58 
59 	dev = &rte_eth_devices[port];
60 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
61 
62 	if (!is_ixgbe_supported(dev))
63 		return -ENOTSUP;
64 
65 	if (vf >= pci_dev->max_vfs)
66 		return -EINVAL;
67 
68 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
69 	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
70 
71 	ctrl = IXGBE_PF_CONTROL_MSG;
72 	if (vfinfo[vf].clear_to_send)
73 		ctrl |= IXGBE_VT_MSGTYPE_CTS;
74 
75 	ixgbe_write_mbx(hw, &ctrl, 1, vf);
76 
77 	return 0;
78 }
79 
80 int
rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port,uint16_t vf,uint8_t on)81 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
82 {
83 	struct ixgbe_hw *hw;
84 	struct ixgbe_mac_info *mac;
85 	struct rte_eth_dev *dev;
86 	struct rte_pci_device *pci_dev;
87 
88 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
89 
90 	dev = &rte_eth_devices[port];
91 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
92 
93 	if (!is_ixgbe_supported(dev))
94 		return -ENOTSUP;
95 
96 	if (vf >= pci_dev->max_vfs)
97 		return -EINVAL;
98 
99 	if (on > 1)
100 		return -EINVAL;
101 
102 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
103 	mac = &hw->mac;
104 
105 	mac->ops.set_vlan_anti_spoofing(hw, on, vf);
106 
107 	return 0;
108 }
109 
110 int
rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port,uint16_t vf,uint8_t on)111 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
112 {
113 	struct ixgbe_hw *hw;
114 	struct ixgbe_mac_info *mac;
115 	struct rte_eth_dev *dev;
116 	struct rte_pci_device *pci_dev;
117 
118 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
119 
120 	dev = &rte_eth_devices[port];
121 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
122 
123 	if (!is_ixgbe_supported(dev))
124 		return -ENOTSUP;
125 
126 	if (vf >= pci_dev->max_vfs)
127 		return -EINVAL;
128 
129 	if (on > 1)
130 		return -EINVAL;
131 
132 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
133 	mac = &hw->mac;
134 	mac->ops.set_mac_anti_spoofing(hw, on, vf);
135 
136 	return 0;
137 }
138 
139 int
rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port,uint16_t vf,uint16_t vlan_id)140 rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
141 {
142 	struct ixgbe_hw *hw;
143 	uint32_t ctrl;
144 	struct rte_eth_dev *dev;
145 	struct rte_pci_device *pci_dev;
146 
147 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
148 
149 	dev = &rte_eth_devices[port];
150 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
151 
152 	if (!is_ixgbe_supported(dev))
153 		return -ENOTSUP;
154 
155 	if (vf >= pci_dev->max_vfs)
156 		return -EINVAL;
157 
158 	if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
159 		return -EINVAL;
160 
161 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
162 	ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
163 	if (vlan_id) {
164 		ctrl = vlan_id;
165 		ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
166 	} else {
167 		ctrl = 0;
168 	}
169 
170 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
171 
172 	return 0;
173 }
174 
175 int
rte_pmd_ixgbe_set_tx_loopback(uint16_t port,uint8_t on)176 rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
177 {
178 	struct ixgbe_hw *hw;
179 	uint32_t ctrl;
180 	struct rte_eth_dev *dev;
181 
182 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
183 
184 	dev = &rte_eth_devices[port];
185 
186 	if (!is_ixgbe_supported(dev))
187 		return -ENOTSUP;
188 
189 	if (on > 1)
190 		return -EINVAL;
191 
192 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
193 	ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
194 	/* enable or disable VMDQ loopback */
195 	if (on)
196 		ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
197 	else
198 		ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
199 
200 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
201 
202 	return 0;
203 }
204 
205 int
rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port,uint8_t on)206 rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
207 {
208 	struct ixgbe_hw *hw;
209 	uint32_t reg_value;
210 	int i;
211 	int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
212 	struct rte_eth_dev *dev;
213 
214 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
215 
216 	dev = &rte_eth_devices[port];
217 
218 	if (!is_ixgbe_supported(dev))
219 		return -ENOTSUP;
220 
221 	if (on > 1)
222 		return -EINVAL;
223 
224 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
225 	for (i = 0; i <= num_queues; i++) {
226 		reg_value = IXGBE_QDE_WRITE |
227 				(i << IXGBE_QDE_IDX_SHIFT) |
228 				(on & IXGBE_QDE_ENABLE);
229 		IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
230 	}
231 
232 	return 0;
233 }
234 
235 int
rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port,uint16_t vf,uint8_t on)236 rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
237 {
238 	struct ixgbe_hw *hw;
239 	uint32_t reg_value;
240 	struct rte_eth_dev *dev;
241 	struct rte_pci_device *pci_dev;
242 
243 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
244 
245 	dev = &rte_eth_devices[port];
246 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
247 
248 	if (!is_ixgbe_supported(dev))
249 		return -ENOTSUP;
250 
251 	/* only support VF's 0 to 63 */
252 	if ((vf >= pci_dev->max_vfs) || (vf > 63))
253 		return -EINVAL;
254 
255 	if (on > 1)
256 		return -EINVAL;
257 
258 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
259 	reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
260 	if (on)
261 		reg_value |= IXGBE_SRRCTL_DROP_EN;
262 	else
263 		reg_value &= ~IXGBE_SRRCTL_DROP_EN;
264 
265 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
266 
267 	return 0;
268 }
269 
270 int
rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port,uint16_t vf,uint8_t on)271 rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
272 {
273 	struct rte_eth_dev *dev;
274 	struct rte_pci_device *pci_dev;
275 	struct ixgbe_hw *hw;
276 	uint16_t queues_per_pool;
277 	uint32_t q;
278 
279 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
280 
281 	dev = &rte_eth_devices[port];
282 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
283 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
284 
285 	if (!is_ixgbe_supported(dev))
286 		return -ENOTSUP;
287 
288 	if (vf >= pci_dev->max_vfs)
289 		return -EINVAL;
290 
291 	if (on > 1)
292 		return -EINVAL;
293 
294 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
295 
296 	/* The PF has 128 queue pairs and in SRIOV configuration
297 	 * those queues will be assigned to VF's, so RXDCTL
298 	 * registers will be dealing with queues which will be
299 	 * assigned to VF's.
300 	 * Let's say we have SRIOV configured with 31 VF's then the
301 	 * first 124 queues 0-123 will be allocated to VF's and only
302 	 * the last 4 queues 123-127 will be assigned to the PF.
303 	 */
304 	if (hw->mac.type == ixgbe_mac_82598EB)
305 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
306 				  ETH_16_POOLS;
307 	else
308 		queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
309 				  ETH_64_POOLS;
310 
311 	for (q = 0; q < queues_per_pool; q++)
312 		(*dev->dev_ops->vlan_strip_queue_set)(dev,
313 				q + vf * queues_per_pool, on);
314 	return 0;
315 }
316 
317 int
rte_pmd_ixgbe_set_vf_rxmode(uint16_t port,uint16_t vf,uint16_t rx_mask,uint8_t on)318 rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
319 			    uint16_t rx_mask, uint8_t on)
320 {
321 	int val = 0;
322 	struct rte_eth_dev *dev;
323 	struct rte_pci_device *pci_dev;
324 	struct ixgbe_hw *hw;
325 	uint32_t vmolr;
326 
327 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
328 
329 	dev = &rte_eth_devices[port];
330 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
331 
332 	if (!is_ixgbe_supported(dev))
333 		return -ENOTSUP;
334 
335 	if (vf >= pci_dev->max_vfs)
336 		return -EINVAL;
337 
338 	if (on > 1)
339 		return -EINVAL;
340 
341 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
342 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
343 
344 	if (hw->mac.type == ixgbe_mac_82598EB) {
345 		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
346 			     " on 82599 hardware and newer");
347 		return -ENOTSUP;
348 	}
349 	if (ixgbe_vt_check(hw) < 0)
350 		return -ENOTSUP;
351 
352 	val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
353 
354 	if (on)
355 		vmolr |= val;
356 	else
357 		vmolr &= ~val;
358 
359 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
360 
361 	return 0;
362 }
363 
364 int
rte_pmd_ixgbe_set_vf_rx(uint16_t port,uint16_t vf,uint8_t on)365 rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
366 {
367 	struct rte_eth_dev *dev;
368 	struct rte_pci_device *pci_dev;
369 	uint32_t reg, addr;
370 	uint32_t val;
371 	const uint8_t bit1 = 0x1;
372 	struct ixgbe_hw *hw;
373 
374 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
375 
376 	dev = &rte_eth_devices[port];
377 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
378 
379 	if (!is_ixgbe_supported(dev))
380 		return -ENOTSUP;
381 
382 	if (vf >= pci_dev->max_vfs)
383 		return -EINVAL;
384 
385 	if (on > 1)
386 		return -EINVAL;
387 
388 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
389 
390 	if (ixgbe_vt_check(hw) < 0)
391 		return -ENOTSUP;
392 
393 	/* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
394 	if (vf >= 32) {
395 		addr = IXGBE_VFRE(1);
396 		val = bit1 << (vf - 32);
397 	} else {
398 		addr = IXGBE_VFRE(0);
399 		val = bit1 << vf;
400 	}
401 
402 	reg = IXGBE_READ_REG(hw, addr);
403 
404 	if (on)
405 		reg |= val;
406 	else
407 		reg &= ~val;
408 
409 	IXGBE_WRITE_REG(hw, addr, reg);
410 
411 	return 0;
412 }
413 
414 int
rte_pmd_ixgbe_set_vf_tx(uint16_t port,uint16_t vf,uint8_t on)415 rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
416 {
417 	struct rte_eth_dev *dev;
418 	struct rte_pci_device *pci_dev;
419 	uint32_t reg, addr;
420 	uint32_t val;
421 	const uint8_t bit1 = 0x1;
422 
423 	struct ixgbe_hw *hw;
424 
425 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
426 
427 	dev = &rte_eth_devices[port];
428 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
429 
430 	if (!is_ixgbe_supported(dev))
431 		return -ENOTSUP;
432 
433 	if (vf >= pci_dev->max_vfs)
434 		return -EINVAL;
435 
436 	if (on > 1)
437 		return -EINVAL;
438 
439 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
440 	if (ixgbe_vt_check(hw) < 0)
441 		return -ENOTSUP;
442 
443 	/* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
444 	if (vf >= 32) {
445 		addr = IXGBE_VFTE(1);
446 		val = bit1 << (vf - 32);
447 	} else {
448 		addr = IXGBE_VFTE(0);
449 		val = bit1 << vf;
450 	}
451 
452 	reg = IXGBE_READ_REG(hw, addr);
453 
454 	if (on)
455 		reg |= val;
456 	else
457 		reg &= ~val;
458 
459 	IXGBE_WRITE_REG(hw, addr, reg);
460 
461 	return 0;
462 }
463 
464 int
rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port,uint16_t vlan,uint64_t vf_mask,uint8_t vlan_on)465 rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
466 				 uint64_t vf_mask, uint8_t vlan_on)
467 {
468 	struct rte_eth_dev *dev;
469 	int ret = 0;
470 	uint16_t vf_idx;
471 	struct ixgbe_hw *hw;
472 
473 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
474 
475 	dev = &rte_eth_devices[port];
476 
477 	if (!is_ixgbe_supported(dev))
478 		return -ENOTSUP;
479 
480 	if (vlan > RTE_ETHER_MAX_VLAN_ID || vf_mask == 0)
481 		return -EINVAL;
482 
483 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484 	if (ixgbe_vt_check(hw) < 0)
485 		return -ENOTSUP;
486 
487 	for (vf_idx = 0; vf_idx < 64; vf_idx++) {
488 		if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
489 			ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
490 						   vlan_on, false);
491 			if (ret < 0)
492 				return ret;
493 		}
494 	}
495 
496 	return ret;
497 }
498 
499 int
rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port,uint16_t vf,uint16_t tx_rate,uint64_t q_msk)500 rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
501 				uint16_t tx_rate, uint64_t q_msk)
502 {
503 	struct rte_eth_dev *dev;
504 
505 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
506 
507 	dev = &rte_eth_devices[port];
508 
509 	if (!is_ixgbe_supported(dev))
510 		return -ENOTSUP;
511 
512 	return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
513 }
514 
515 int
rte_pmd_ixgbe_macsec_enable(uint16_t port,uint8_t en,uint8_t rp)516 rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
517 {
518 	struct rte_eth_dev *dev;
519 	struct ixgbe_macsec_setting macsec_setting;
520 
521 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
522 
523 	dev = &rte_eth_devices[port];
524 
525 	if (!is_ixgbe_supported(dev))
526 		return -ENOTSUP;
527 
528 	macsec_setting.offload_en = 1;
529 	macsec_setting.encrypt_en = en;
530 	macsec_setting.replayprotect_en = rp;
531 
532 	ixgbe_dev_macsec_setting_save(dev, &macsec_setting);
533 
534 	ixgbe_dev_macsec_register_enable(dev, &macsec_setting);
535 
536 	return 0;
537 }
538 
539 int
rte_pmd_ixgbe_macsec_disable(uint16_t port)540 rte_pmd_ixgbe_macsec_disable(uint16_t port)
541 {
542 	struct rte_eth_dev *dev;
543 
544 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
545 
546 	dev = &rte_eth_devices[port];
547 
548 	if (!is_ixgbe_supported(dev))
549 		return -ENOTSUP;
550 
551 	ixgbe_dev_macsec_setting_reset(dev);
552 
553 	ixgbe_dev_macsec_register_disable(dev);
554 
555 	return 0;
556 }
557 
558 int
rte_pmd_ixgbe_macsec_config_txsc(uint16_t port,uint8_t * mac)559 rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
560 {
561 	struct ixgbe_hw *hw;
562 	struct rte_eth_dev *dev;
563 	uint32_t ctrl;
564 
565 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
566 
567 	dev = &rte_eth_devices[port];
568 
569 	if (!is_ixgbe_supported(dev))
570 		return -ENOTSUP;
571 
572 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
573 
574 	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
575 	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
576 
577 	ctrl = mac[4] | (mac[5] << 8);
578 	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
579 
580 	return 0;
581 }
582 
583 int
rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port,uint8_t * mac,uint16_t pi)584 rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
585 {
586 	struct ixgbe_hw *hw;
587 	struct rte_eth_dev *dev;
588 	uint32_t ctrl;
589 
590 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
591 
592 	dev = &rte_eth_devices[port];
593 
594 	if (!is_ixgbe_supported(dev))
595 		return -ENOTSUP;
596 
597 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
598 
599 	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
600 	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
601 
602 	pi = rte_cpu_to_be_16(pi);
603 	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
604 	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
605 
606 	return 0;
607 }
608 
609 int
rte_pmd_ixgbe_macsec_select_txsa(uint16_t port,uint8_t idx,uint8_t an,uint32_t pn,uint8_t * key)610 rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
611 				 uint32_t pn, uint8_t *key)
612 {
613 	struct ixgbe_hw *hw;
614 	struct rte_eth_dev *dev;
615 	uint32_t ctrl, i;
616 
617 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
618 
619 	dev = &rte_eth_devices[port];
620 
621 	if (!is_ixgbe_supported(dev))
622 		return -ENOTSUP;
623 
624 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625 
626 	if (idx != 0 && idx != 1)
627 		return -EINVAL;
628 
629 	if (an >= 4)
630 		return -EINVAL;
631 
632 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
633 
634 	/* Set the PN and key */
635 	pn = rte_cpu_to_be_32(pn);
636 	if (idx == 0) {
637 		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
638 
639 		for (i = 0; i < 4; i++) {
640 			ctrl = (key[i * 4 + 0] <<  0) |
641 			       (key[i * 4 + 1] <<  8) |
642 			       (key[i * 4 + 2] << 16) |
643 			       (key[i * 4 + 3] << 24);
644 			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
645 		}
646 	} else {
647 		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
648 
649 		for (i = 0; i < 4; i++) {
650 			ctrl = (key[i * 4 + 0] <<  0) |
651 			       (key[i * 4 + 1] <<  8) |
652 			       (key[i * 4 + 2] << 16) |
653 			       (key[i * 4 + 3] << 24);
654 			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
655 		}
656 	}
657 
658 	/* Set AN and select the SA */
659 	ctrl = (an << idx * 2) | (idx << 4);
660 	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
661 
662 	return 0;
663 }
664 
665 int
rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port,uint8_t idx,uint8_t an,uint32_t pn,uint8_t * key)666 rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
667 				 uint32_t pn, uint8_t *key)
668 {
669 	struct ixgbe_hw *hw;
670 	struct rte_eth_dev *dev;
671 	uint32_t ctrl, i;
672 
673 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
674 
675 	dev = &rte_eth_devices[port];
676 
677 	if (!is_ixgbe_supported(dev))
678 		return -ENOTSUP;
679 
680 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
681 
682 	if (idx != 0 && idx != 1)
683 		return -EINVAL;
684 
685 	if (an >= 4)
686 		return -EINVAL;
687 
688 	/* Set the PN */
689 	pn = rte_cpu_to_be_32(pn);
690 	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
691 
692 	/* Set the key */
693 	for (i = 0; i < 4; i++) {
694 		ctrl = (key[i * 4 + 0] <<  0) |
695 		       (key[i * 4 + 1] <<  8) |
696 		       (key[i * 4 + 2] << 16) |
697 		       (key[i * 4 + 3] << 24);
698 		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
699 	}
700 
701 	/* Set the AN and validate the SA */
702 	ctrl = an | (1 << 2);
703 	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
704 
705 	return 0;
706 }
707 
708 int
rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,uint8_t tc_num,uint8_t * bw_weight)709 rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
710 			      uint8_t tc_num,
711 			      uint8_t *bw_weight)
712 {
713 	struct rte_eth_dev *dev;
714 	struct ixgbe_dcb_config *dcb_config;
715 	struct ixgbe_dcb_tc_config *tc;
716 	struct rte_eth_conf *eth_conf;
717 	struct ixgbe_bw_conf *bw_conf;
718 	uint8_t i;
719 	uint8_t nb_tcs;
720 	uint16_t sum;
721 
722 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
723 
724 	dev = &rte_eth_devices[port];
725 
726 	if (!is_ixgbe_supported(dev))
727 		return -ENOTSUP;
728 
729 	if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
730 		PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
731 			    IXGBE_DCB_MAX_TRAFFIC_CLASS);
732 		return -EINVAL;
733 	}
734 
735 	dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
736 	bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
737 	eth_conf = &dev->data->dev_conf;
738 
739 	if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
740 		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
741 	} else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
742 		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
743 		    ETH_32_POOLS)
744 			nb_tcs = ETH_4_TCS;
745 		else
746 			nb_tcs = ETH_8_TCS;
747 	} else {
748 		nb_tcs = 1;
749 	}
750 
751 	if (nb_tcs != tc_num) {
752 		PMD_DRV_LOG(ERR,
753 			    "Weight should be set for all %d enabled TCs.",
754 			    nb_tcs);
755 		return -EINVAL;
756 	}
757 
758 	sum = 0;
759 	for (i = 0; i < nb_tcs; i++)
760 		sum += bw_weight[i];
761 	if (sum != 100) {
762 		PMD_DRV_LOG(ERR,
763 			    "The summary of the TC weight should be 100.");
764 		return -EINVAL;
765 	}
766 
767 	for (i = 0; i < nb_tcs; i++) {
768 		tc = &dcb_config->tc_config[i];
769 		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
770 	}
771 	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
772 		tc = &dcb_config->tc_config[i];
773 		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
774 	}
775 
776 	bw_conf->tc_num = nb_tcs;
777 
778 	return 0;
779 }
780 
781 int
rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port,int enable)782 rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
783 {
784 	struct ixgbe_hw *hw;
785 	struct rte_eth_dev *dev;
786 	uint32_t fctrl;
787 
788 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
789 	dev = &rte_eth_devices[port];
790 	if (!is_ixgbe_supported(dev))
791 		return -ENOTSUP;
792 
793 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
794 	if (!hw)
795 		return -ENOTSUP;
796 
797 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
798 
799 	/* If 'enable' set the SBP bit else clear it */
800 	if (enable)
801 		fctrl |= IXGBE_FCTRL_SBP;
802 	else
803 		fctrl &= ~(IXGBE_FCTRL_SBP);
804 
805 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
806 	return 0;
807 }
808 
809 #ifdef RTE_LIBRTE_IXGBE_BYPASS
810 int
rte_pmd_ixgbe_bypass_init(uint16_t port_id)811 rte_pmd_ixgbe_bypass_init(uint16_t port_id)
812 {
813 	struct rte_eth_dev *dev;
814 
815 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
816 
817 	dev = &rte_eth_devices[port_id];
818 	if (!is_ixgbe_supported(dev))
819 		return -ENOTSUP;
820 
821 	ixgbe_bypass_init(dev);
822 	return 0;
823 }
824 
825 int
rte_pmd_ixgbe_bypass_state_show(uint16_t port_id,uint32_t * state)826 rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
827 {
828 	struct rte_eth_dev *dev;
829 
830 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
831 
832 	dev = &rte_eth_devices[port_id];
833 	if (!is_ixgbe_supported(dev))
834 		return -ENOTSUP;
835 
836 	return ixgbe_bypass_state_show(dev, state);
837 }
838 
839 int
rte_pmd_ixgbe_bypass_state_set(uint16_t port_id,uint32_t * new_state)840 rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
841 {
842 	struct rte_eth_dev *dev;
843 
844 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
845 
846 	dev = &rte_eth_devices[port_id];
847 	if (!is_ixgbe_supported(dev))
848 		return -ENOTSUP;
849 
850 	return ixgbe_bypass_state_store(dev, new_state);
851 }
852 
853 int
rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,uint32_t event,uint32_t * state)854 rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
855 				uint32_t event,
856 				uint32_t *state)
857 {
858 	struct rte_eth_dev *dev;
859 
860 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
861 
862 	dev = &rte_eth_devices[port_id];
863 	if (!is_ixgbe_supported(dev))
864 		return -ENOTSUP;
865 
866 	return ixgbe_bypass_event_show(dev, event, state);
867 }
868 
869 int
rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,uint32_t event,uint32_t state)870 rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
871 				 uint32_t event,
872 				 uint32_t state)
873 {
874 	struct rte_eth_dev *dev;
875 
876 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
877 
878 	dev = &rte_eth_devices[port_id];
879 	if (!is_ixgbe_supported(dev))
880 		return -ENOTSUP;
881 
882 	return ixgbe_bypass_event_store(dev, event, state);
883 }
884 
885 int
rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id,uint32_t timeout)886 rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
887 {
888 	struct rte_eth_dev *dev;
889 
890 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
891 
892 	dev = &rte_eth_devices[port_id];
893 	if (!is_ixgbe_supported(dev))
894 		return -ENOTSUP;
895 
896 	return ixgbe_bypass_wd_timeout_store(dev, timeout);
897 }
898 
899 int
rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id,uint32_t * ver)900 rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
901 {
902 	struct rte_eth_dev *dev;
903 
904 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
905 
906 	dev = &rte_eth_devices[port_id];
907 	if (!is_ixgbe_supported(dev))
908 		return -ENOTSUP;
909 
910 	return ixgbe_bypass_ver_show(dev, ver);
911 }
912 
913 int
rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id,uint32_t * wd_timeout)914 rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
915 {
916 	struct rte_eth_dev *dev;
917 
918 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
919 
920 	dev = &rte_eth_devices[port_id];
921 	if (!is_ixgbe_supported(dev))
922 		return -ENOTSUP;
923 
924 	return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
925 }
926 
927 int
rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)928 rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
929 {
930 	struct rte_eth_dev *dev;
931 
932 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
933 
934 	dev = &rte_eth_devices[port_id];
935 	if (!is_ixgbe_supported(dev))
936 		return -ENOTSUP;
937 
938 	return ixgbe_bypass_wd_reset(dev);
939 }
940 #endif
941 
942 /**
943  *  rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
944  *  @hw: pointer to hardware structure
945  *  @mask: Mask to specify which semaphore to acquire
946  *
947  *  Acquires the SWFW semaphore and get the shared phy token as needed
948  */
rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw * hw,u32 mask)949 STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
950 {
951 	int retries = FW_PHY_TOKEN_RETRIES;
952 	s32 status = IXGBE_SUCCESS;
953 
954 	while (--retries) {
955 		status = ixgbe_acquire_swfw_semaphore(hw, mask);
956 		if (status) {
957 			PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
958 				    status);
959 			return status;
960 		}
961 		status = ixgbe_get_phy_token(hw);
962 		if (status == IXGBE_SUCCESS)
963 			return IXGBE_SUCCESS;
964 
965 		if (status == IXGBE_ERR_TOKEN_RETRY)
966 			PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
967 				    status);
968 
969 		ixgbe_release_swfw_semaphore(hw, mask);
970 		if (status != IXGBE_ERR_TOKEN_RETRY) {
971 			PMD_DRV_LOG(ERR,
972 				    "Retry get PHY token failed, Status=%d\n",
973 				    status);
974 			return status;
975 		}
976 	}
977 	PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
978 		    hw->phy.id);
979 	return status;
980 }
981 
982 /**
983  *  rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
984  *  @hw: pointer to hardware structure
985  *  @mask: Mask to specify which semaphore to release
986  *
987  *  Releases the SWFW semaphore and puts the shared phy token as needed
988  */
rte_pmd_ixgbe_release_swfw(struct ixgbe_hw * hw,u32 mask)989 STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
990 {
991 	ixgbe_put_phy_token(hw);
992 	ixgbe_release_swfw_semaphore(hw, mask);
993 }
994 
995 int
rte_pmd_ixgbe_mdio_lock(uint16_t port)996 rte_pmd_ixgbe_mdio_lock(uint16_t port)
997 {
998 	struct ixgbe_hw *hw;
999 	struct rte_eth_dev *dev;
1000 	u32 swfw_mask;
1001 
1002 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1003 	dev = &rte_eth_devices[port];
1004 	if (!is_ixgbe_supported(dev))
1005 		return -ENOTSUP;
1006 
1007 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1008 	if (!hw)
1009 		return -ENOTSUP;
1010 
1011 	if (hw->bus.lan_id)
1012 		swfw_mask = IXGBE_GSSR_PHY1_SM;
1013 	else
1014 		swfw_mask = IXGBE_GSSR_PHY0_SM;
1015 
1016 	if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
1017 		return IXGBE_ERR_SWFW_SYNC;
1018 
1019 	return IXGBE_SUCCESS;
1020 }
1021 
1022 int
rte_pmd_ixgbe_mdio_unlock(uint16_t port)1023 rte_pmd_ixgbe_mdio_unlock(uint16_t port)
1024 {
1025 	struct rte_eth_dev *dev;
1026 	struct ixgbe_hw *hw;
1027 	u32 swfw_mask;
1028 
1029 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1030 
1031 	dev = &rte_eth_devices[port];
1032 	if (!is_ixgbe_supported(dev))
1033 		return -ENOTSUP;
1034 
1035 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1036 	if (!hw)
1037 		return -ENOTSUP;
1038 
1039 	if (hw->bus.lan_id)
1040 		swfw_mask = IXGBE_GSSR_PHY1_SM;
1041 	else
1042 		swfw_mask = IXGBE_GSSR_PHY0_SM;
1043 
1044 	rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
1045 
1046 	return IXGBE_SUCCESS;
1047 }
1048 
1049 int
rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port,uint32_t reg_addr,uint32_t dev_type,uint16_t * phy_data)1050 rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
1051 				 uint32_t dev_type, uint16_t *phy_data)
1052 {
1053 	struct ixgbe_hw *hw;
1054 	struct rte_eth_dev *dev;
1055 	u32 i, data, command;
1056 
1057 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1058 	dev = &rte_eth_devices[port];
1059 	if (!is_ixgbe_supported(dev))
1060 		return -ENOTSUP;
1061 
1062 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1063 	if (!hw)
1064 		return -ENOTSUP;
1065 
1066 	/* Setup and write the read command */
1067 	command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1068 		  (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1069 		  IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
1070 		  IXGBE_MSCA_MDI_COMMAND;
1071 
1072 	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1073 
1074 	/* Check every 10 usec to see if the access completed.
1075 	 * The MDI Command bit will clear when the operation is
1076 	 * complete
1077 	 */
1078 	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1079 		usec_delay(10);
1080 
1081 		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1082 		if (!(command & IXGBE_MSCA_MDI_COMMAND))
1083 			break;
1084 	}
1085 	if (command & IXGBE_MSCA_MDI_COMMAND)
1086 		return IXGBE_ERR_PHY;
1087 
1088 	/* Read operation is complete.  Get the data from MSRWD */
1089 	data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
1090 	data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
1091 	*phy_data = (u16)data;
1092 
1093 	return 0;
1094 }
1095 
1096 int
rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port,uint32_t reg_addr,uint32_t dev_type,uint16_t phy_data)1097 rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
1098 				  uint32_t dev_type, uint16_t phy_data)
1099 {
1100 	struct ixgbe_hw *hw;
1101 	u32 i, command;
1102 	struct rte_eth_dev *dev;
1103 
1104 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1105 	dev = &rte_eth_devices[port];
1106 	if (!is_ixgbe_supported(dev))
1107 		return -ENOTSUP;
1108 
1109 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1110 	if (!hw)
1111 		return -ENOTSUP;
1112 
1113 	/* Put the data in the MDI single read and write data register*/
1114 	IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
1115 
1116 	/* Setup and write the write command */
1117 	command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
1118 		  (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
1119 		  IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
1120 		  IXGBE_MSCA_MDI_COMMAND;
1121 
1122 	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
1123 
1124 	/* Check every 10 usec to see if the access completed.
1125 	 * The MDI Command bit will clear when the operation is
1126 	 * complete
1127 	 */
1128 	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1129 		usec_delay(10);
1130 
1131 		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
1132 		if (!(command & IXGBE_MSCA_MDI_COMMAND))
1133 			break;
1134 	}
1135 	if (command & IXGBE_MSCA_MDI_COMMAND) {
1136 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1137 			      "PHY write cmd didn't complete\n");
1138 		return IXGBE_ERR_PHY;
1139 	}
1140 	return 0;
1141 }
1142 
1143 int
rte_pmd_ixgbe_get_fdir_info(uint16_t port,struct rte_eth_fdir_info * fdir_info)1144 rte_pmd_ixgbe_get_fdir_info(uint16_t port, struct rte_eth_fdir_info *fdir_info)
1145 {
1146 	struct rte_eth_dev *dev;
1147 
1148 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1149 
1150 	dev = &rte_eth_devices[port];
1151 	if (!is_ixgbe_supported(dev))
1152 		return -ENOTSUP;
1153 
1154 	ixgbe_fdir_info_get(dev, fdir_info);
1155 
1156 	return 0;
1157 }
1158 
1159 int
rte_pmd_ixgbe_get_fdir_stats(uint16_t port,struct rte_eth_fdir_stats * fdir_stats)1160 rte_pmd_ixgbe_get_fdir_stats(uint16_t port,
1161 			     struct rte_eth_fdir_stats *fdir_stats)
1162 {
1163 	struct rte_eth_dev *dev;
1164 
1165 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1166 
1167 	dev = &rte_eth_devices[port];
1168 	if (!is_ixgbe_supported(dev))
1169 		return -ENOTSUP;
1170 
1171 	ixgbe_fdir_stats_get(dev, fdir_stats);
1172 
1173 	return 0;
1174 }
1175