xref: /f-stack/dpdk/drivers/net/e1000/igb_pf.c (revision 4418919f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 
13 #include <rte_bus_pci.h>
14 #include <rte_interrupts.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_eal.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_memcpy.h>
21 #include <rte_malloc.h>
22 #include <rte_random.h>
23 
24 #include "base/e1000_defines.h"
25 #include "base/e1000_regs.h"
26 #include "base/e1000_hw.h"
27 #include "e1000_ethdev.h"
28 
29 static inline uint16_t
dev_num_vf(struct rte_eth_dev * eth_dev)30 dev_num_vf(struct rte_eth_dev *eth_dev)
31 {
32 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
33 
34 	return pci_dev->max_vfs;
35 }
36 
37 static inline
igb_vf_perm_addr_gen(struct rte_eth_dev * dev,uint16_t vf_num)38 int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
39 {
40 	unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
41 	struct e1000_vf_info *vfinfo =
42 		*E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
43 	uint16_t vfn;
44 
45 	for (vfn = 0; vfn < vf_num; vfn++) {
46 		rte_eth_random_addr(vf_mac_addr);
47 		/* keep the random address as default */
48 		memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
49 				RTE_ETHER_ADDR_LEN);
50 	}
51 
52 	return 0;
53 }
54 
55 static inline int
igb_mb_intr_setup(struct rte_eth_dev * dev)56 igb_mb_intr_setup(struct rte_eth_dev *dev)
57 {
58 	struct e1000_interrupt *intr =
59 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
60 
61 	intr->mask |= E1000_ICR_VMMB;
62 
63 	return 0;
64 }
65 
igb_pf_host_init(struct rte_eth_dev * eth_dev)66 void igb_pf_host_init(struct rte_eth_dev *eth_dev)
67 {
68 	struct e1000_vf_info **vfinfo =
69 		E1000_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
70 	struct e1000_hw *hw =
71 		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
72 	uint16_t vf_num;
73 	uint8_t nb_queue;
74 
75 	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
76 	if (0 == (vf_num = dev_num_vf(eth_dev)))
77 		return;
78 
79 	if (hw->mac.type == e1000_i350)
80 		nb_queue = 1;
81 	else if(hw->mac.type == e1000_82576)
82 		/* per datasheet, it should be 2, but 1 seems correct */
83 		nb_queue = 1;
84 	else
85 		return;
86 
87 	*vfinfo = rte_zmalloc("vf_info", sizeof(struct e1000_vf_info) * vf_num, 0);
88 	if (*vfinfo == NULL)
89 		rte_panic("Cannot allocate memory for private VF data\n");
90 
91 	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
92 	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
93 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
94 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
95 
96 	igb_vf_perm_addr_gen(eth_dev, vf_num);
97 
98 	/* set mb interrupt mask */
99 	igb_mb_intr_setup(eth_dev);
100 
101 	return;
102 }
103 
igb_pf_host_uninit(struct rte_eth_dev * dev)104 void igb_pf_host_uninit(struct rte_eth_dev *dev)
105 {
106 	struct e1000_vf_info **vfinfo;
107 	uint16_t vf_num;
108 
109 	PMD_INIT_FUNC_TRACE();
110 
111 	vfinfo = E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
112 
113 	RTE_ETH_DEV_SRIOV(dev).active = 0;
114 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 0;
115 	RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx = 0;
116 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 0;
117 
118 	vf_num = dev_num_vf(dev);
119 	if (vf_num == 0)
120 		return;
121 
122 	rte_free(*vfinfo);
123 	*vfinfo = NULL;
124 }
125 
126 #define E1000_RAH_POOLSEL_SHIFT    (18)
igb_pf_host_configure(struct rte_eth_dev * eth_dev)127 int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
128 {
129 	uint32_t vtctl;
130 	uint16_t vf_num;
131 	struct e1000_hw *hw =
132 		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
133 	uint32_t vlanctrl;
134 	int i;
135 	uint32_t rah;
136 
137 	if (0 == (vf_num = dev_num_vf(eth_dev)))
138 		return -1;
139 
140 	/* enable VMDq and set the default pool for PF */
141 	vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
142 	vtctl &= ~E1000_VT_CTL_DEFAULT_POOL_MASK;
143 	vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
144 		<< E1000_VT_CTL_DEFAULT_POOL_SHIFT;
145 	vtctl |= E1000_VT_CTL_VM_REPL_EN;
146 	E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
147 
148 	/* Enable pools reserved to PF only */
149 	E1000_WRITE_REG(hw, E1000_VFRE, (~0U) << vf_num);
150 	E1000_WRITE_REG(hw, E1000_VFTE, (~0U) << vf_num);
151 
152 	/* PFDMA Tx General Switch Control Enables VMDQ loopback */
153 	if (hw->mac.type == e1000_i350)
154 		E1000_WRITE_REG(hw, E1000_TXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
155 	else
156 		E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
157 
158 	/* clear VMDq map to perment rar 0 */
159 	rah = E1000_READ_REG(hw, E1000_RAH(0));
160 	rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
161 	E1000_WRITE_REG(hw, E1000_RAH(0), rah);
162 
163 	/* clear VMDq map to scan rar 32 */
164 	rah = E1000_READ_REG(hw, E1000_RAH(hw->mac.rar_entry_count));
165 	rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
166 	E1000_WRITE_REG(hw, E1000_RAH(hw->mac.rar_entry_count), rah);
167 
168 	/* set VMDq map to default PF pool */
169 	rah = E1000_READ_REG(hw, E1000_RAH(0));
170 	rah |= (0x1 << (RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx +
171 			E1000_RAH_POOLSEL_SHIFT));
172 	E1000_WRITE_REG(hw, E1000_RAH(0), rah);
173 
174 	/*
175 	 * enable vlan filtering and allow all vlan tags through
176 	 */
177 	vlanctrl = E1000_READ_REG(hw, E1000_RCTL);
178 	vlanctrl |= E1000_RCTL_VFE ; /* enable vlan filters */
179 	E1000_WRITE_REG(hw, E1000_RCTL, vlanctrl);
180 
181 	/* VFTA - enable all vlan filters */
182 	for (i = 0; i < IGB_VFTA_SIZE; i++) {
183 		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, 0xFFFFFFFF);
184 	}
185 
186 	/* Enable/Disable MAC Anti-Spoofing */
187 	e1000_vmdq_set_anti_spoofing_pf(hw, FALSE, vf_num);
188 
189 	return 0;
190 }
191 
192 static void
set_rx_mode(struct rte_eth_dev * dev)193 set_rx_mode(struct rte_eth_dev *dev)
194 {
195 	struct rte_eth_dev_data *dev_data = dev->data;
196 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
197 	uint32_t fctrl, vmolr = E1000_VMOLR_BAM | E1000_VMOLR_AUPE;
198 	uint16_t vfn = dev_num_vf(dev);
199 
200 	/* Check for Promiscuous and All Multicast modes */
201 	fctrl = E1000_READ_REG(hw, E1000_RCTL);
202 
203 	/* set all bits that we expect to always be set */
204 	fctrl &= ~E1000_RCTL_SBP; /* disable store-bad-packets */
205 	fctrl |= E1000_RCTL_BAM;
206 
207 	/* clear the bits we are changing the status of */
208 	fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
209 
210 	if (dev_data->promiscuous) {
211 		fctrl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
212 		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
213 	} else {
214 		if (dev_data->all_multicast) {
215 			fctrl |= E1000_RCTL_MPE;
216 			vmolr |= E1000_VMOLR_MPME;
217 		} else {
218 			vmolr |= E1000_VMOLR_ROMPE;
219 		}
220 	}
221 
222 	if ((hw->mac.type == e1000_82576) ||
223 		(hw->mac.type == e1000_i350)) {
224 		vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
225 			 ~(E1000_VMOLR_MPME | E1000_VMOLR_ROMPE |
226 			   E1000_VMOLR_ROPE);
227 		E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
228 	}
229 
230 	E1000_WRITE_REG(hw, E1000_RCTL, fctrl);
231 }
232 
233 static inline void
igb_vf_reset_event(struct rte_eth_dev * dev,uint16_t vf)234 igb_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
235 {
236 	struct e1000_hw *hw =
237 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
238 	struct e1000_vf_info *vfinfo =
239 		*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
240 	uint32_t vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
241 
242 	vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE |
243 			E1000_VMOLR_BAM | E1000_VMOLR_AUPE);
244 	E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
245 
246 	E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0);
247 
248 	/* reset multicast table array for vf */
249 	vfinfo[vf].num_vf_mc_hashes = 0;
250 
251 	/* reset rx mode */
252 	set_rx_mode(dev);
253 }
254 
255 static inline void
igb_vf_reset_msg(struct rte_eth_dev * dev,uint16_t vf)256 igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
257 {
258 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
259 	uint32_t reg;
260 
261 	/* enable transmit and receive for vf */
262 	reg = E1000_READ_REG(hw, E1000_VFTE);
263 	reg |= (reg | (1 << vf));
264 	E1000_WRITE_REG(hw, E1000_VFTE, reg);
265 
266 	reg = E1000_READ_REG(hw, E1000_VFRE);
267 	reg |= (reg | (1 << vf));
268 	E1000_WRITE_REG(hw, E1000_VFRE, reg);
269 
270 	igb_vf_reset_event(dev, vf);
271 }
272 
273 static int
igb_vf_reset(struct rte_eth_dev * dev,uint16_t vf,uint32_t * msgbuf)274 igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
275 {
276 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
277 	struct e1000_vf_info *vfinfo =
278 		*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
279 	unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
280 	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
281 	uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
282 	uint32_t rah;
283 
284 	igb_vf_reset_msg(dev, vf);
285 
286 	hw->mac.ops.rar_set(hw, vf_mac, rar_entry);
287 	rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
288 	rah |= (0x1 << (vf + E1000_RAH_POOLSEL_SHIFT));
289 	E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
290 
291 	/* reply to reset with ack and vf mac address */
292 	msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
293 	rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
294 	e1000_write_mbx(hw, msgbuf, 3, vf);
295 
296 	return 0;
297 }
298 
299 static int
igb_vf_set_mac_addr(struct rte_eth_dev * dev,uint32_t vf,uint32_t * msgbuf)300 igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
301 {
302 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
303 	struct e1000_vf_info *vfinfo =
304 		*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
305 	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
306 	uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
307 	int rah;
308 
309 	if (rte_is_unicast_ether_addr((struct rte_ether_addr *)new_mac)) {
310 		if (!rte_is_zero_ether_addr((struct rte_ether_addr *)new_mac))
311 			rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
312 				sizeof(vfinfo[vf].vf_mac_addresses));
313 		hw->mac.ops.rar_set(hw, new_mac, rar_entry);
314 		rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
315 		rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + vf));
316 		E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
317 		return 0;
318 	}
319 	return -1;
320 }
321 
322 static int
igb_vf_set_multicast(struct rte_eth_dev * dev,__rte_unused uint32_t vf,uint32_t * msgbuf)323 igb_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
324 {
325 	int i;
326 	uint32_t vector_bit;
327 	uint32_t vector_reg;
328 	uint32_t mta_reg;
329 	int entries = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >>
330 		E1000_VT_MSGINFO_SHIFT;
331 	uint16_t *hash_list = (uint16_t *)&msgbuf[1];
332 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
333 	struct e1000_vf_info *vfinfo =
334 		*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
335 
336 	/* only so many hash values supported */
337 	entries = RTE_MIN(entries, E1000_MAX_VF_MC_ENTRIES);
338 
339 	/*
340 	 * salt away the number of multi cast addresses assigned
341 	 * to this VF for later use to restore when the PF multi cast
342 	 * list changes
343 	 */
344 	vfinfo->num_vf_mc_hashes = (uint16_t)entries;
345 
346 	/*
347 	 * VFs are limited to using the MTA hash table for their multicast
348 	 * addresses
349 	 */
350 	for (i = 0; i < entries; i++) {
351 		vfinfo->vf_mc_hashes[i] = hash_list[i];
352 	}
353 
354 	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
355 		vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
356 		vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
357 		mta_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, vector_reg);
358 		mta_reg |= (1 << vector_bit);
359 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, vector_reg, mta_reg);
360 	}
361 
362 	return 0;
363 }
364 
365 static int
igb_vf_set_vlan(struct rte_eth_dev * dev,uint32_t vf,uint32_t * msgbuf)366 igb_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
367 {
368 	int add, vid;
369 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
370 	struct e1000_vf_info *vfinfo =
371 		*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
372 	uint32_t vid_idx, vid_bit, vfta;
373 
374 	add = (msgbuf[0] & E1000_VT_MSGINFO_MASK)
375 		>> E1000_VT_MSGINFO_SHIFT;
376 	vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
377 
378 	if (add)
379 		vfinfo[vf].vlan_count++;
380 	else if (vfinfo[vf].vlan_count)
381 		vfinfo[vf].vlan_count--;
382 
383 	vid_idx = (uint32_t)((vid >> E1000_VFTA_ENTRY_SHIFT) &
384 			     E1000_VFTA_ENTRY_MASK);
385 	vid_bit = (uint32_t)(1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
386 	vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
387 	if (add)
388 		vfta |= vid_bit;
389 	else
390 		vfta &= ~vid_bit;
391 
392 	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
393 	E1000_WRITE_FLUSH(hw);
394 
395 	return 0;
396 }
397 
398 static int
igb_vf_set_rlpml(struct rte_eth_dev * dev,uint32_t vf,uint32_t * msgbuf)399 igb_vf_set_rlpml(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
400 {
401 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
402 	uint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK;
403 	uint32_t max_frame = rlpml + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
404 	uint32_t vmolr;
405 
406 	if (max_frame < RTE_ETHER_MIN_LEN ||
407 			max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
408 		return -1;
409 
410 	vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
411 
412 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
413 	vmolr |= rlpml;
414 
415 	/* Enable Long Packet support */
416 	vmolr |= E1000_VMOLR_LPE;
417 
418 	E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
419 	E1000_WRITE_FLUSH(hw);
420 
421 	return 0;
422 }
423 
424 static int
igb_rcv_msg_from_vf(struct rte_eth_dev * dev,uint16_t vf)425 igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
426 {
427 	uint16_t mbx_size = E1000_VFMAILBOX_SIZE;
428 	uint32_t msgbuf[E1000_VFMAILBOX_SIZE];
429 	int32_t retval;
430 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
431 
432 	retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf);
433 	if (retval) {
434 		PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf);
435 		return retval;
436 	}
437 
438 	/* do nothing with the message already processed */
439 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
440 		return retval;
441 
442 	/* flush the ack before we write any messages back */
443 	E1000_WRITE_FLUSH(hw);
444 
445 	/* perform VF reset */
446 	if (msgbuf[0] == E1000_VF_RESET) {
447 		return igb_vf_reset(dev, vf, msgbuf);
448 	}
449 
450 	/* check & process VF to PF mailbox message */
451 	switch ((msgbuf[0] & 0xFFFF)) {
452 	case E1000_VF_SET_MAC_ADDR:
453 		retval = igb_vf_set_mac_addr(dev, vf, msgbuf);
454 		break;
455 	case E1000_VF_SET_MULTICAST:
456 		retval = igb_vf_set_multicast(dev, vf, msgbuf);
457 		break;
458 	case E1000_VF_SET_LPE:
459 		retval = igb_vf_set_rlpml(dev, vf, msgbuf);
460 		break;
461 	case E1000_VF_SET_VLAN:
462 		retval = igb_vf_set_vlan(dev, vf, msgbuf);
463 		break;
464 	default:
465 		PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x",
466 			     (unsigned) msgbuf[0]);
467 		retval = E1000_ERR_MBX;
468 		break;
469 	}
470 
471 	/* response the VF according to the message process result */
472 	if (retval)
473 		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
474 	else
475 		msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
476 
477 	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
478 
479 	e1000_write_mbx(hw, msgbuf, 1, vf);
480 
481 	return retval;
482 }
483 
484 static inline void
igb_rcv_ack_from_vf(struct rte_eth_dev * dev,uint16_t vf)485 igb_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
486 {
487 	uint32_t msg = E1000_VT_MSGTYPE_NACK;
488 	struct e1000_hw *hw =
489 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490 
491 	e1000_write_mbx(hw, &msg, 1, vf);
492 }
493 
igb_pf_mbx_process(struct rte_eth_dev * eth_dev)494 void igb_pf_mbx_process(struct rte_eth_dev *eth_dev)
495 {
496 	uint16_t vf;
497 	struct e1000_hw *hw =
498 		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
499 
500 	for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
501 		/* check & process vf function level reset */
502 		if (!e1000_check_for_rst(hw, vf))
503 			igb_vf_reset_event(eth_dev, vf);
504 
505 		/* check & process vf mailbox messages */
506 		if (!e1000_check_for_msg(hw, vf))
507 			igb_rcv_msg_from_vf(eth_dev, vf);
508 
509 		/* check & process acks from vf */
510 		if (!e1000_check_for_ack(hw, vf))
511 			igb_rcv_ack_from_vf(eth_dev, vf);
512 	}
513 }
514