1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
4 */
5
6 #include <rte_ether.h>
7 #include <ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_bus_pci.h>
10
11 #include "base/ngbe.h"
12 #include "ngbe_ethdev.h"
13
14 #define NGBE_MAX_VFTA (128)
15 #define NGBE_VF_MSG_SIZE_DEFAULT 1
16 #define NGBE_VF_GET_QUEUE_MSG_SIZE 5
17
18 static inline uint16_t
dev_num_vf(struct rte_eth_dev * eth_dev)19 dev_num_vf(struct rte_eth_dev *eth_dev)
20 {
21 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
22
23 /* EM only support 7 VFs. */
24 return pci_dev->max_vfs;
25 }
26
27 static inline
ngbe_vf_perm_addr_gen(struct rte_eth_dev * dev,uint16_t vf_num)28 int ngbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
29 {
30 unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
31 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
32 uint16_t vfn;
33
34 for (vfn = 0; vfn < vf_num; vfn++) {
35 rte_eth_random_addr(vf_mac_addr);
36 /* keep the random address as default */
37 memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
38 RTE_ETHER_ADDR_LEN);
39 }
40
41 return 0;
42 }
43
44 static inline int
ngbe_mb_intr_setup(struct rte_eth_dev * dev)45 ngbe_mb_intr_setup(struct rte_eth_dev *dev)
46 {
47 struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
48
49 intr->mask_misc |= NGBE_ICRMISC_VFMBX;
50
51 return 0;
52 }
53
ngbe_pf_host_init(struct rte_eth_dev * eth_dev)54 int ngbe_pf_host_init(struct rte_eth_dev *eth_dev)
55 {
56 struct ngbe_vf_info **vfinfo = NGBE_DEV_VFDATA(eth_dev);
57 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(eth_dev);
58 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
59 uint16_t vf_num;
60 uint8_t nb_queue = 1;
61 int ret = 0;
62
63 PMD_INIT_FUNC_TRACE();
64
65 RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
66 vf_num = dev_num_vf(eth_dev);
67 if (vf_num == 0)
68 return ret;
69
70 *vfinfo = rte_zmalloc("vf_info",
71 sizeof(struct ngbe_vf_info) * vf_num, 0);
72 if (*vfinfo == NULL) {
73 PMD_INIT_LOG(ERR,
74 "Cannot allocate memory for private VF data\n");
75 return -ENOMEM;
76 }
77
78 ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
79 if (ret) {
80 PMD_INIT_LOG(ERR,
81 "failed to allocate switch domain for device %d", ret);
82 rte_free(*vfinfo);
83 *vfinfo = NULL;
84 return ret;
85 }
86
87 memset(uta_info, 0, sizeof(struct ngbe_uta_info));
88 hw->mac.mc_filter_type = 0;
89
90 RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_8_POOLS;
91 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
92 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx =
93 (uint16_t)(vf_num * nb_queue);
94
95 ngbe_vf_perm_addr_gen(eth_dev, vf_num);
96
97 /* init_mailbox_params */
98 hw->mbx.init_params(hw);
99
100 /* set mb interrupt mask */
101 ngbe_mb_intr_setup(eth_dev);
102
103 return ret;
104 }
105
ngbe_pf_host_uninit(struct rte_eth_dev * eth_dev)106 void ngbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
107 {
108 struct ngbe_vf_info **vfinfo;
109 uint16_t vf_num;
110 int ret;
111
112 PMD_INIT_FUNC_TRACE();
113
114 RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
115 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
116 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
117
118 vf_num = dev_num_vf(eth_dev);
119 if (vf_num == 0)
120 return;
121
122 vfinfo = NGBE_DEV_VFDATA(eth_dev);
123 if (*vfinfo == NULL)
124 return;
125
126 ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
127 if (ret)
128 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
129
130 rte_free(*vfinfo);
131 *vfinfo = NULL;
132 }
133
ngbe_pf_host_configure(struct rte_eth_dev * eth_dev)134 int ngbe_pf_host_configure(struct rte_eth_dev *eth_dev)
135 {
136 uint32_t vtctl, fcrth;
137 uint32_t vfre_offset;
138 uint16_t vf_num;
139 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
140 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
141 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
142 uint32_t gpie;
143 uint32_t gcr_ext;
144 uint32_t vlanctrl;
145 int i;
146
147 vf_num = dev_num_vf(eth_dev);
148 if (vf_num == 0)
149 return -1;
150
151 /* set the default pool for PF */
152 vtctl = rd32(hw, NGBE_POOLCTL);
153 vtctl &= ~NGBE_POOLCTL_DEFPL_MASK;
154 vtctl |= NGBE_POOLCTL_DEFPL(vf_num);
155 vtctl |= NGBE_POOLCTL_RPLEN;
156 wr32(hw, NGBE_POOLCTL, vtctl);
157
158 vfre_offset = vf_num & VFRE_MASK;
159
160 /* Enable pools reserved to PF only */
161 wr32(hw, NGBE_POOLRXENA(0), (~0U) << vfre_offset);
162 wr32(hw, NGBE_POOLTXENA(0), (~0U) << vfre_offset);
163
164 wr32(hw, NGBE_PSRCTL, NGBE_PSRCTL_LBENA);
165
166 /* clear VMDq map to permanent rar 0 */
167 hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
168
169 /* clear VMDq map to scan rar 31 */
170 wr32(hw, NGBE_ETHADDRIDX, hw->mac.num_rar_entries);
171 wr32(hw, NGBE_ETHADDRASS, 0);
172
173 /* set VMDq map to default PF pool */
174 hw->mac.set_vmdq(hw, 0, vf_num);
175
176 /*
177 * SW msut set PORTCTL.VT_Mode the same as GPIE.VT_Mode
178 */
179 gpie = rd32(hw, NGBE_GPIE);
180 gpie |= NGBE_GPIE_MSIX;
181 gcr_ext = rd32(hw, NGBE_PORTCTL);
182 gcr_ext &= ~NGBE_PORTCTL_NUMVT_MASK;
183
184 if (RTE_ETH_DEV_SRIOV(eth_dev).active == RTE_ETH_8_POOLS)
185 gcr_ext |= NGBE_PORTCTL_NUMVT_8;
186
187 wr32(hw, NGBE_PORTCTL, gcr_ext);
188 wr32(hw, NGBE_GPIE, gpie);
189
190 /*
191 * enable vlan filtering and allow all vlan tags through
192 */
193 vlanctrl = rd32(hw, NGBE_VLANCTL);
194 vlanctrl |= NGBE_VLANCTL_VFE; /* enable vlan filters */
195 wr32(hw, NGBE_VLANCTL, vlanctrl);
196
197 /* enable all vlan filters */
198 for (i = 0; i < NGBE_MAX_VFTA; i++)
199 wr32(hw, NGBE_VLANTBL(i), 0xFFFFFFFF);
200
201 /* Enable MAC Anti-Spoofing */
202 hw->mac.set_mac_anti_spoofing(hw, FALSE, vf_num);
203
204 /* set flow control threshold to max to avoid tx switch hang */
205 wr32(hw, NGBE_FCWTRLO, 0);
206 fcrth = rd32(hw, NGBE_PBRXSIZE) - 32;
207 wr32(hw, NGBE_FCWTRHI, fcrth);
208
209 return 0;
210 }
211
212 static void
ngbe_set_rx_mode(struct rte_eth_dev * eth_dev)213 ngbe_set_rx_mode(struct rte_eth_dev *eth_dev)
214 {
215 struct rte_eth_dev_data *dev_data = eth_dev->data;
216 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
217 u32 fctrl, vmolr;
218 uint16_t vfn = dev_num_vf(eth_dev);
219
220 /* disable store-bad-packets */
221 wr32m(hw, NGBE_SECRXCTL, NGBE_SECRXCTL_SAVEBAD, 0);
222
223 /* Check for Promiscuous and All Multicast modes */
224 fctrl = rd32m(hw, NGBE_PSRCTL,
225 ~(NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP));
226 fctrl |= NGBE_PSRCTL_BCA |
227 NGBE_PSRCTL_MCHFENA;
228
229 vmolr = rd32m(hw, NGBE_POOLETHCTL(vfn),
230 ~(NGBE_POOLETHCTL_UCP |
231 NGBE_POOLETHCTL_MCP |
232 NGBE_POOLETHCTL_UCHA |
233 NGBE_POOLETHCTL_MCHA));
234 vmolr |= NGBE_POOLETHCTL_BCA |
235 NGBE_POOLETHCTL_UTA |
236 NGBE_POOLETHCTL_VLA;
237
238 if (dev_data->promiscuous) {
239 fctrl |= NGBE_PSRCTL_UCP |
240 NGBE_PSRCTL_MCP;
241 /* pf don't want packets routing to vf, so clear UPE */
242 vmolr |= NGBE_POOLETHCTL_MCP;
243 } else if (dev_data->all_multicast) {
244 fctrl |= NGBE_PSRCTL_MCP;
245 vmolr |= NGBE_POOLETHCTL_MCP;
246 } else {
247 vmolr |= NGBE_POOLETHCTL_UCHA;
248 vmolr |= NGBE_POOLETHCTL_MCHA;
249 }
250
251 wr32(hw, NGBE_POOLETHCTL(vfn), vmolr);
252
253 wr32(hw, NGBE_PSRCTL, fctrl);
254
255 ngbe_vlan_hw_strip_config(eth_dev);
256 }
257
258 static inline void
ngbe_vf_reset_event(struct rte_eth_dev * eth_dev,uint16_t vf)259 ngbe_vf_reset_event(struct rte_eth_dev *eth_dev, uint16_t vf)
260 {
261 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
262 struct ngbe_vf_info *vfinfo = *(NGBE_DEV_VFDATA(eth_dev));
263 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
264 uint32_t vmolr = rd32(hw, NGBE_POOLETHCTL(vf));
265
266 vmolr |= (NGBE_POOLETHCTL_UCHA |
267 NGBE_POOLETHCTL_BCA | NGBE_POOLETHCTL_UTA);
268 wr32(hw, NGBE_POOLETHCTL(vf), vmolr);
269
270 wr32(hw, NGBE_POOLTAG(vf), 0);
271
272 /* reset multicast table array for vf */
273 vfinfo[vf].num_vf_mc_hashes = 0;
274
275 /* reset rx mode */
276 ngbe_set_rx_mode(eth_dev);
277
278 hw->mac.clear_rar(hw, rar_entry);
279 }
280
281 static inline void
ngbe_vf_reset_msg(struct rte_eth_dev * eth_dev,uint16_t vf)282 ngbe_vf_reset_msg(struct rte_eth_dev *eth_dev, uint16_t vf)
283 {
284 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
285 uint32_t reg;
286 uint32_t vf_shift;
287 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
288 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
289 uint8_t nb_q_per_pool;
290 int i;
291
292 vf_shift = vf & VFRE_MASK;
293
294 /* enable transmit for vf */
295 reg = rd32(hw, NGBE_POOLTXENA(0));
296 reg |= (1 << vf_shift);
297 wr32(hw, NGBE_POOLTXENA(0), reg);
298
299 /* enable all queue drop for IOV */
300 nb_q_per_pool = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
301 for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
302 ngbe_flush(hw);
303 reg = 1 << (i % 32);
304 wr32m(hw, NGBE_QPRXDROP, reg, reg);
305 }
306
307 /* enable receive for vf */
308 reg = rd32(hw, NGBE_POOLRXENA(0));
309 reg |= (reg | (1 << vf_shift));
310 wr32(hw, NGBE_POOLRXENA(0), reg);
311
312 ngbe_vf_reset_event(eth_dev, vf);
313 }
314
315 static int
ngbe_disable_vf_mc_promisc(struct rte_eth_dev * eth_dev,uint32_t vf)316 ngbe_disable_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf)
317 {
318 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
319 uint32_t vmolr;
320
321 vmolr = rd32(hw, NGBE_POOLETHCTL(vf));
322
323 PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
324
325 vmolr &= ~NGBE_POOLETHCTL_MCP;
326
327 wr32(hw, NGBE_POOLETHCTL(vf), vmolr);
328
329 return 0;
330 }
331
332 static int
ngbe_vf_reset(struct rte_eth_dev * eth_dev,uint16_t vf,uint32_t * msgbuf)333 ngbe_vf_reset(struct rte_eth_dev *eth_dev, uint16_t vf, uint32_t *msgbuf)
334 {
335 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
336 struct ngbe_vf_info *vfinfo = *(NGBE_DEV_VFDATA(eth_dev));
337 unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
338 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
339 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
340
341 ngbe_vf_reset_msg(eth_dev, vf);
342
343 hw->mac.set_rar(hw, rar_entry, vf_mac, vf, true);
344
345 /* Disable multicast promiscuous at reset */
346 ngbe_disable_vf_mc_promisc(eth_dev, vf);
347
348 /* reply to reset with ack and vf mac address */
349 msgbuf[0] = NGBE_VF_RESET | NGBE_VT_MSGTYPE_ACK;
350 rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
351 /*
352 * Piggyback the multicast filter type so VF can compute the
353 * correct vectors
354 */
355 msgbuf[3] = hw->mac.mc_filter_type;
356 ngbe_write_mbx(hw, msgbuf, NGBE_VF_PERMADDR_MSG_LEN, vf);
357
358 return 0;
359 }
360
361 static int
ngbe_vf_set_mac_addr(struct rte_eth_dev * eth_dev,uint32_t vf,uint32_t * msgbuf)362 ngbe_vf_set_mac_addr(struct rte_eth_dev *eth_dev,
363 uint32_t vf, uint32_t *msgbuf)
364 {
365 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
366 struct ngbe_vf_info *vfinfo = *(NGBE_DEV_VFDATA(eth_dev));
367 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
368 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
369 struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
370
371 if (rte_is_valid_assigned_ether_addr(ea)) {
372 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
373 return hw->mac.set_rar(hw, rar_entry, new_mac, vf, true);
374 }
375 return -1;
376 }
377
378 static int
ngbe_vf_set_multicast(struct rte_eth_dev * eth_dev,uint32_t vf,uint32_t * msgbuf)379 ngbe_vf_set_multicast(struct rte_eth_dev *eth_dev,
380 uint32_t vf, uint32_t *msgbuf)
381 {
382 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
383 struct ngbe_vf_info *vfinfo = *(NGBE_DEV_VFDATA(eth_dev));
384 int nb_entries = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >>
385 NGBE_VT_MSGINFO_SHIFT;
386 uint16_t *hash_list = (uint16_t *)&msgbuf[1];
387 uint32_t mta_idx;
388 uint32_t mta_shift;
389 const uint32_t NGBE_MTA_INDEX_MASK = 0x7F;
390 const uint32_t NGBE_MTA_BIT_SHIFT = 5;
391 const uint32_t NGBE_MTA_BIT_MASK = (0x1 << NGBE_MTA_BIT_SHIFT) - 1;
392 uint32_t reg_val;
393 int i;
394 u32 vmolr = rd32(hw, NGBE_POOLETHCTL(vf));
395
396 /* Disable multicast promiscuous first */
397 ngbe_disable_vf_mc_promisc(eth_dev, vf);
398
399 /* only so many hash values supported */
400 nb_entries = RTE_MIN(nb_entries, NGBE_MAX_VF_MC_ENTRIES);
401
402 /* store the mc entries */
403 vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
404 for (i = 0; i < nb_entries; i++)
405 vfinfo->vf_mc_hashes[i] = hash_list[i];
406
407 if (nb_entries == 0) {
408 vmolr &= ~NGBE_POOLETHCTL_MCHA;
409 wr32(hw, NGBE_POOLETHCTL(vf), vmolr);
410 return 0;
411 }
412
413 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
414 mta_idx = (vfinfo->vf_mc_hashes[i] >> NGBE_MTA_BIT_SHIFT)
415 & NGBE_MTA_INDEX_MASK;
416 mta_shift = vfinfo->vf_mc_hashes[i] & NGBE_MTA_BIT_MASK;
417 reg_val = rd32(hw, NGBE_MCADDRTBL(mta_idx));
418 reg_val |= (1 << mta_shift);
419 wr32(hw, NGBE_MCADDRTBL(mta_idx), reg_val);
420 }
421
422 vmolr |= NGBE_POOLETHCTL_MCHA;
423 wr32(hw, NGBE_POOLETHCTL(vf), vmolr);
424
425 return 0;
426 }
427
428 static int
ngbe_vf_set_vlan(struct rte_eth_dev * eth_dev,uint32_t vf,uint32_t * msgbuf)429 ngbe_vf_set_vlan(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
430 {
431 int add, vid;
432 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
433 struct ngbe_vf_info *vfinfo = *(NGBE_DEV_VFDATA(eth_dev));
434
435 add = (msgbuf[0] & NGBE_VT_MSGINFO_MASK)
436 >> NGBE_VT_MSGINFO_SHIFT;
437 vid = NGBE_PSRVLAN_VID(msgbuf[1]);
438
439 if (add)
440 vfinfo[vf].vlan_count++;
441 else if (vfinfo[vf].vlan_count)
442 vfinfo[vf].vlan_count--;
443 return hw->mac.set_vfta(hw, vid, vf, (bool)add, false);
444 }
445
446 static int
ngbe_set_vf_lpe(struct rte_eth_dev * eth_dev,__rte_unused uint32_t vf,uint32_t * msgbuf)447 ngbe_set_vf_lpe(struct rte_eth_dev *eth_dev,
448 __rte_unused uint32_t vf, uint32_t *msgbuf)
449 {
450 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
451 uint32_t max_frame = msgbuf[1];
452 uint32_t max_frs;
453
454 if (max_frame < RTE_ETHER_MIN_LEN ||
455 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
456 return -1;
457
458 max_frs = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
459 if (max_frs < max_frame) {
460 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
461 NGBE_FRMSZ_MAX(max_frame));
462 }
463
464 return 0;
465 }
466
467 static int
ngbe_negotiate_vf_api(struct rte_eth_dev * eth_dev,uint32_t vf,uint32_t * msgbuf)468 ngbe_negotiate_vf_api(struct rte_eth_dev *eth_dev,
469 uint32_t vf, uint32_t *msgbuf)
470 {
471 uint32_t api_version = msgbuf[1];
472 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(eth_dev);
473
474 switch (api_version) {
475 case ngbe_mbox_api_10:
476 case ngbe_mbox_api_11:
477 case ngbe_mbox_api_12:
478 case ngbe_mbox_api_13:
479 vfinfo[vf].api_version = (uint8_t)api_version;
480 return 0;
481 default:
482 break;
483 }
484
485 PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
486 api_version, vf);
487
488 return -1;
489 }
490
491 static int
ngbe_get_vf_queues(struct rte_eth_dev * eth_dev,uint32_t vf,uint32_t * msgbuf)492 ngbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
493 {
494 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(eth_dev);
495 uint32_t default_q = 0;
496
497 /* Verify if the PF supports the mbox APIs version or not */
498 switch (vfinfo[vf].api_version) {
499 case ngbe_mbox_api_20:
500 case ngbe_mbox_api_11:
501 case ngbe_mbox_api_12:
502 case ngbe_mbox_api_13:
503 break;
504 default:
505 return -1;
506 }
507
508 /* Notify VF of Rx and Tx queue number */
509 msgbuf[NGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
510 msgbuf[NGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
511
512 /* Notify VF of default queue */
513 msgbuf[NGBE_VF_DEF_QUEUE] = default_q;
514
515 msgbuf[NGBE_VF_TRANS_VLAN] = 0;
516
517 return 0;
518 }
519
520 static int
ngbe_set_vf_mc_promisc(struct rte_eth_dev * eth_dev,uint32_t vf,uint32_t * msgbuf)521 ngbe_set_vf_mc_promisc(struct rte_eth_dev *eth_dev,
522 uint32_t vf, uint32_t *msgbuf)
523 {
524 struct ngbe_vf_info *vfinfo = *(NGBE_DEV_VFDATA(eth_dev));
525 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
526 int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */
527 u32 vmolr, fctrl, disable, enable;
528
529 switch (vfinfo[vf].api_version) {
530 case ngbe_mbox_api_12:
531 /* promisc introduced in 1.3 version */
532 if (xcast_mode == NGBEVF_XCAST_MODE_PROMISC)
533 return -EOPNOTSUPP;
534 break;
535 /* Fall threw */
536 case ngbe_mbox_api_13:
537 break;
538 default:
539 return -1;
540 }
541
542 if (vfinfo[vf].xcast_mode == xcast_mode)
543 goto out;
544
545 switch (xcast_mode) {
546 case NGBEVF_XCAST_MODE_NONE:
547 disable = NGBE_POOLETHCTL_BCA | NGBE_POOLETHCTL_MCHA |
548 NGBE_POOLETHCTL_MCP | NGBE_POOLETHCTL_UCP |
549 NGBE_POOLETHCTL_VLP;
550 enable = 0;
551 break;
552 case NGBEVF_XCAST_MODE_MULTI:
553 disable = NGBE_POOLETHCTL_MCP | NGBE_POOLETHCTL_UCP |
554 NGBE_POOLETHCTL_VLP;
555 enable = NGBE_POOLETHCTL_BCA | NGBE_POOLETHCTL_MCHA;
556 break;
557 case NGBEVF_XCAST_MODE_ALLMULTI:
558 disable = NGBE_POOLETHCTL_UCP | NGBE_POOLETHCTL_VLP;
559 enable = NGBE_POOLETHCTL_BCA | NGBE_POOLETHCTL_MCHA |
560 NGBE_POOLETHCTL_MCP;
561 break;
562 case NGBEVF_XCAST_MODE_PROMISC:
563 fctrl = rd32(hw, NGBE_PSRCTL);
564 if (!(fctrl & NGBE_PSRCTL_UCP)) {
565 /* VF promisc requires PF in promisc */
566 PMD_DRV_LOG(ERR,
567 "Enabling VF promisc requires PF in promisc\n");
568 return -1;
569 }
570
571 disable = 0;
572 enable = NGBE_POOLETHCTL_BCA | NGBE_POOLETHCTL_MCHA |
573 NGBE_POOLETHCTL_MCP | NGBE_POOLETHCTL_UCP |
574 NGBE_POOLETHCTL_VLP;
575 break;
576 default:
577 return -1;
578 }
579
580 vmolr = rd32(hw, NGBE_POOLETHCTL(vf));
581 vmolr &= ~disable;
582 vmolr |= enable;
583 wr32(hw, NGBE_POOLETHCTL(vf), vmolr);
584 vfinfo[vf].xcast_mode = xcast_mode;
585
586 out:
587 msgbuf[1] = xcast_mode;
588
589 return 0;
590 }
591
592 static int
ngbe_set_vf_macvlan_msg(struct rte_eth_dev * dev,uint32_t vf,uint32_t * msgbuf)593 ngbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
594 {
595 struct ngbe_hw *hw = ngbe_dev_hw(dev);
596 struct ngbe_vf_info *vf_info = *(NGBE_DEV_VFDATA(dev));
597 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
598 struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
599 int index = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >>
600 NGBE_VT_MSGINFO_SHIFT;
601
602 if (index) {
603 if (!rte_is_valid_assigned_ether_addr(ea)) {
604 PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
605 return -1;
606 }
607
608 vf_info[vf].mac_count++;
609
610 hw->mac.set_rar(hw, vf_info[vf].mac_count,
611 new_mac, vf, true);
612 } else {
613 if (vf_info[vf].mac_count) {
614 hw->mac.clear_rar(hw, vf_info[vf].mac_count);
615 vf_info[vf].mac_count = 0;
616 }
617 }
618 return 0;
619 }
620
621 static int
ngbe_rcv_msg_from_vf(struct rte_eth_dev * eth_dev,uint16_t vf)622 ngbe_rcv_msg_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
623 {
624 uint16_t mbx_size = NGBE_P2VMBX_SIZE;
625 uint16_t msg_size = NGBE_VF_MSG_SIZE_DEFAULT;
626 uint32_t msgbuf[NGBE_P2VMBX_SIZE];
627 int32_t retval;
628 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
629 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(eth_dev);
630 struct ngbe_mb_event_param ret_param;
631
632 retval = ngbe_read_mbx(hw, msgbuf, mbx_size, vf);
633 if (retval) {
634 PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
635 return retval;
636 }
637
638 /* do nothing with the message already been processed */
639 if (msgbuf[0] & (NGBE_VT_MSGTYPE_ACK | NGBE_VT_MSGTYPE_NACK))
640 return retval;
641
642 /* flush the ack before we write any messages back */
643 ngbe_flush(hw);
644
645 /**
646 * initialise structure to send to user application
647 * will return response from user in retval field
648 */
649 ret_param.retval = NGBE_MB_EVENT_PROCEED;
650 ret_param.vfid = vf;
651 ret_param.msg_type = msgbuf[0] & 0xFFFF;
652 ret_param.msg = (void *)msgbuf;
653
654 /* perform VF reset */
655 if (msgbuf[0] == NGBE_VF_RESET) {
656 int ret = ngbe_vf_reset(eth_dev, vf, msgbuf);
657
658 vfinfo[vf].clear_to_send = true;
659
660 /* notify application about VF reset */
661 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
662 &ret_param);
663 return ret;
664 }
665
666 /**
667 * ask user application if we allowed to perform those functions
668 * if we get ret_param.retval == RTE_PMD_COMPAT_MB_EVENT_PROCEED
669 * then business as usual,
670 * if 0, do nothing and send ACK to VF
671 * if ret_param.retval > 1, do nothing and send NAK to VF
672 */
673 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
674 &ret_param);
675
676 retval = ret_param.retval;
677
678 /* check & process VF to PF mailbox message */
679 switch ((msgbuf[0] & 0xFFFF)) {
680 case NGBE_VF_SET_MAC_ADDR:
681 if (retval == NGBE_MB_EVENT_PROCEED)
682 retval = ngbe_vf_set_mac_addr(eth_dev, vf, msgbuf);
683 break;
684 case NGBE_VF_SET_MULTICAST:
685 if (retval == NGBE_MB_EVENT_PROCEED)
686 retval = ngbe_vf_set_multicast(eth_dev, vf, msgbuf);
687 break;
688 case NGBE_VF_SET_LPE:
689 if (retval == NGBE_MB_EVENT_PROCEED)
690 retval = ngbe_set_vf_lpe(eth_dev, vf, msgbuf);
691 break;
692 case NGBE_VF_SET_VLAN:
693 if (retval == NGBE_MB_EVENT_PROCEED)
694 retval = ngbe_vf_set_vlan(eth_dev, vf, msgbuf);
695 break;
696 case NGBE_VF_API_NEGOTIATE:
697 retval = ngbe_negotiate_vf_api(eth_dev, vf, msgbuf);
698 break;
699 case NGBE_VF_GET_QUEUES:
700 retval = ngbe_get_vf_queues(eth_dev, vf, msgbuf);
701 msg_size = NGBE_VF_GET_QUEUE_MSG_SIZE;
702 break;
703 case NGBE_VF_UPDATE_XCAST_MODE:
704 if (retval == NGBE_MB_EVENT_PROCEED)
705 retval = ngbe_set_vf_mc_promisc(eth_dev, vf, msgbuf);
706 break;
707 case NGBE_VF_SET_MACVLAN:
708 if (retval == NGBE_MB_EVENT_PROCEED)
709 retval = ngbe_set_vf_macvlan_msg(eth_dev, vf, msgbuf);
710 break;
711 default:
712 PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (uint32_t)msgbuf[0]);
713 retval = NGBE_ERR_MBX;
714 break;
715 }
716
717 /* response the VF according to the message process result */
718 if (retval)
719 msgbuf[0] |= NGBE_VT_MSGTYPE_NACK;
720 else
721 msgbuf[0] |= NGBE_VT_MSGTYPE_ACK;
722
723 msgbuf[0] |= NGBE_VT_MSGTYPE_CTS;
724
725 ngbe_write_mbx(hw, msgbuf, msg_size, vf);
726
727 return retval;
728 }
729
730 static inline void
ngbe_rcv_ack_from_vf(struct rte_eth_dev * eth_dev,uint16_t vf)731 ngbe_rcv_ack_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
732 {
733 uint32_t msg = NGBE_VT_MSGTYPE_NACK;
734 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
735 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(eth_dev);
736
737 if (!vfinfo[vf].clear_to_send)
738 ngbe_write_mbx(hw, &msg, 1, vf);
739 }
740
ngbe_pf_mbx_process(struct rte_eth_dev * eth_dev)741 void ngbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
742 {
743 uint16_t vf;
744 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
745
746 for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
747 /* check & process vf function level reset */
748 if (!ngbe_check_for_rst(hw, vf))
749 ngbe_vf_reset_event(eth_dev, vf);
750
751 /* check & process vf mailbox messages */
752 if (!ngbe_check_for_msg(hw, vf))
753 ngbe_rcv_msg_from_vf(eth_dev, vf);
754
755 /* check & process acks from vf */
756 if (!ngbe_check_for_ack(hw, vf))
757 ngbe_rcv_ack_from_vf(eth_dev, vf);
758 }
759 }
760