1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved.
3 */
4
5 #include <stdint.h>
6 #include <stdio.h>
7
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
10 #include <rte_dev.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_pci.h>
16 #include <rte_string_fns.h>
17
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "vnic_dev.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
23 #include "vnic_cq.h"
24 #include "vnic_wq.h"
25 #include "vnic_rq.h"
26
enic_vf_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28 struct rte_mbuf **rx_pkts,
29 uint16_t nb_pkts)
30 {
31 return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
32 }
33
enic_vf_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35 struct rte_mbuf **tx_pkts,
36 uint16_t nb_pkts)
37 {
38 return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
39 }
40
enic_vf_dev_tx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
42 uint16_t queue_idx,
43 uint16_t nb_desc,
44 unsigned int socket_id,
45 const struct rte_eth_txconf *tx_conf)
46 {
47 struct enic_vf_representor *vf;
48 struct vnic_wq *wq;
49 struct enic *pf;
50 int err;
51
52 ENICPMD_FUNC_TRACE();
53 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54 return -E_RTE_SECONDARY;
55 /* Only one queue now */
56 if (queue_idx != 0)
57 return -EINVAL;
58 vf = eth_dev->data->dev_private;
59 pf = vf->pf;
60 wq = &pf->wq[vf->pf_wq_idx];
61 wq->offloads = tx_conf->offloads |
62 eth_dev->data->dev_conf.txmode.offloads;
63 eth_dev->data->tx_queues[0] = (void *)wq;
64 /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65 err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
66 if (err) {
67 ENICPMD_LOG(ERR, "error in allocating wq\n");
68 return err;
69 }
70 return 0;
71 }
72
enic_vf_dev_tx_queue_release(void * txq)73 static void enic_vf_dev_tx_queue_release(void *txq)
74 {
75 ENICPMD_FUNC_TRACE();
76 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
77 return;
78 enic_free_wq(txq);
79 }
80
enic_vf_dev_rx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)81 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
82 uint16_t queue_idx,
83 uint16_t nb_desc,
84 unsigned int socket_id,
85 const struct rte_eth_rxconf *rx_conf,
86 struct rte_mempool *mp)
87 {
88 struct enic_vf_representor *vf;
89 struct enic *pf;
90 int ret;
91
92 ENICPMD_FUNC_TRACE();
93 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
94 return -E_RTE_SECONDARY;
95 /* Only 1 queue now */
96 if (queue_idx != 0)
97 return -EINVAL;
98 vf = eth_dev->data->dev_private;
99 pf = vf->pf;
100 eth_dev->data->rx_queues[queue_idx] =
101 (void *)&pf->rq[vf->pf_rq_sop_idx];
102 ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
103 rx_conf->rx_free_thresh);
104 if (ret) {
105 ENICPMD_LOG(ERR, "error in allocating rq\n");
106 return ret;
107 }
108 return 0;
109 }
110
enic_vf_dev_rx_queue_release(void * rxq)111 static void enic_vf_dev_rx_queue_release(void *rxq)
112 {
113 ENICPMD_FUNC_TRACE();
114 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
115 return;
116 enic_free_rq(rxq);
117 }
118
enic_vf_dev_configure(struct rte_eth_dev * eth_dev __rte_unused)119 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
120 {
121 ENICPMD_FUNC_TRACE();
122 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123 return -E_RTE_SECONDARY;
124 return 0;
125 }
126
127 static int
setup_rep_vf_fwd(struct enic_vf_representor * vf)128 setup_rep_vf_fwd(struct enic_vf_representor *vf)
129 {
130 int ret;
131
132 ENICPMD_FUNC_TRACE();
133 /* Representor -> VF rule
134 * Egress packets from this representor are on the representor's WQ.
135 * So, loop back that WQ to VF.
136 */
137 ret = enic_fm_add_rep2vf_flow(vf);
138 if (ret) {
139 ENICPMD_LOG(ERR, "Cannot create representor->VF flow");
140 return ret;
141 }
142 /* VF -> representor rule
143 * Packets from VF loop back to the representor, unless they match
144 * user-added flows.
145 */
146 ret = enic_fm_add_vf2rep_flow(vf);
147 if (ret) {
148 ENICPMD_LOG(ERR, "Cannot create VF->representor flow");
149 return ret;
150 }
151 return 0;
152 }
153
enic_vf_dev_start(struct rte_eth_dev * eth_dev)154 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
155 {
156 struct enic_vf_representor *vf;
157 struct vnic_rq *data_rq;
158 int index, cq_idx;
159 struct enic *pf;
160 int ret;
161
162 ENICPMD_FUNC_TRACE();
163 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
164 return -E_RTE_SECONDARY;
165
166 vf = eth_dev->data->dev_private;
167 pf = vf->pf;
168 /* Get representor flowman for flow API and representor path */
169 ret = enic_fm_init(&vf->enic);
170 if (ret)
171 return ret;
172 /* Set up implicit flow rules to forward between representor and VF */
173 ret = setup_rep_vf_fwd(vf);
174 if (ret) {
175 ENICPMD_LOG(ERR, "Cannot set up representor-VF flows");
176 return ret;
177 }
178 /* Remove all packet filters so no ingress packets go to VF.
179 * When PF enables switchdev, it will ensure packet filters
180 * are removed. So, this is not technically needed.
181 */
182 ENICPMD_LOG(DEBUG, "Clear packet filters");
183 ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
184 if (ret) {
185 ENICPMD_LOG(ERR, "Cannot clear packet filters");
186 return ret;
187 }
188
189 /* Start WQ: see enic_init_vnic_resources */
190 index = vf->pf_wq_idx;
191 cq_idx = vf->pf_wq_cq_idx;
192 vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
193 vnic_cq_init(&pf->cq[cq_idx],
194 0 /* flow_control_enable */,
195 1 /* color_enable */,
196 0 /* cq_head */,
197 0 /* cq_tail */,
198 1 /* cq_tail_color */,
199 0 /* interrupt_enable */,
200 0 /* cq_entry_enable */,
201 1 /* cq_message_enable */,
202 0 /* interrupt offset */,
203 (uint64_t)pf->wq[index].cqmsg_rz->iova);
204 /* enic_start_wq */
205 vnic_wq_enable(&pf->wq[index]);
206 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
207
208 /* Start RQ: see enic_init_vnic_resources */
209 index = vf->pf_rq_sop_idx;
210 cq_idx = enic_cq_rq(vf->pf, index);
211 vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
212 data_rq = &pf->rq[vf->pf_rq_data_idx];
213 if (data_rq->in_use)
214 vnic_rq_init(data_rq, cq_idx, 1, 0);
215 vnic_cq_init(&pf->cq[cq_idx],
216 0 /* flow_control_enable */,
217 1 /* color_enable */,
218 0 /* cq_head */,
219 0 /* cq_tail */,
220 1 /* cq_tail_color */,
221 0,
222 1 /* cq_entry_enable */,
223 0 /* cq_message_enable */,
224 0,
225 0 /* cq_message_addr */);
226 /* enic_enable */
227 ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
228 if (ret) {
229 ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
230 return ret;
231 }
232 ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
233 if (ret) {
234 /* Release the allocated mbufs for the sop rq*/
235 enic_rxmbuf_queue_release(pf, &pf->rq[index]);
236 ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
237 return ret;
238 }
239 enic_start_rq(pf, vf->pf_rq_sop_idx);
240 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
241 eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
242 return 0;
243 }
244
enic_vf_dev_stop(struct rte_eth_dev * eth_dev)245 static int enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
246 {
247 struct enic_vf_representor *vf;
248 struct vnic_rq *rq;
249 struct enic *pf;
250
251 ENICPMD_FUNC_TRACE();
252 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
253 return 0;
254 /* Undo dev_start. Disable/clean WQ */
255 vf = eth_dev->data->dev_private;
256 pf = vf->pf;
257 vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
258 vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
259 vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
260 /* Disable/clean RQ */
261 rq = &pf->rq[vf->pf_rq_sop_idx];
262 vnic_rq_disable(rq);
263 vnic_rq_clean(rq, enic_free_rq_buf);
264 rq = &pf->rq[vf->pf_rq_data_idx];
265 if (rq->in_use) {
266 vnic_rq_disable(rq);
267 vnic_rq_clean(rq, enic_free_rq_buf);
268 }
269 vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
270 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
271 eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
272 /* Clean up representor flowman */
273 enic_fm_destroy(&vf->enic);
274
275 return 0;
276 }
277
278 /*
279 * "close" is no-op for now and solely exists so that rte_eth_dev_close()
280 * can finish its own cleanup without errors.
281 */
enic_vf_dev_close(struct rte_eth_dev * eth_dev __rte_unused)282 static int enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
283 {
284 ENICPMD_FUNC_TRACE();
285 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
286 return 0;
287 return 0;
288 }
289
290 static int
adjust_flow_attr(const struct rte_flow_attr * attrs,struct rte_flow_attr * vf_attrs,struct rte_flow_error * error)291 adjust_flow_attr(const struct rte_flow_attr *attrs,
292 struct rte_flow_attr *vf_attrs,
293 struct rte_flow_error *error)
294 {
295 if (!attrs) {
296 return rte_flow_error_set(error, EINVAL,
297 RTE_FLOW_ERROR_TYPE_ATTR,
298 NULL, "no attribute specified");
299 }
300 /*
301 * Swap ingress and egress as the firmware view of direction
302 * is the opposite of the representor.
303 */
304 *vf_attrs = *attrs;
305 if (attrs->ingress && !attrs->egress) {
306 vf_attrs->ingress = 0;
307 vf_attrs->egress = 1;
308 return 0;
309 }
310 return rte_flow_error_set(error, ENOTSUP,
311 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
312 "representor only supports ingress");
313 }
314
315 static int
enic_vf_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attrs,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)316 enic_vf_flow_validate(struct rte_eth_dev *dev,
317 const struct rte_flow_attr *attrs,
318 const struct rte_flow_item pattern[],
319 const struct rte_flow_action actions[],
320 struct rte_flow_error *error)
321 {
322 struct rte_flow_attr vf_attrs;
323 int ret;
324
325 ret = adjust_flow_attr(attrs, &vf_attrs, error);
326 if (ret)
327 return ret;
328 attrs = &vf_attrs;
329 return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error);
330 }
331
332 static struct rte_flow *
enic_vf_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attrs,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)333 enic_vf_flow_create(struct rte_eth_dev *dev,
334 const struct rte_flow_attr *attrs,
335 const struct rte_flow_item pattern[],
336 const struct rte_flow_action actions[],
337 struct rte_flow_error *error)
338 {
339 struct rte_flow_attr vf_attrs;
340
341 if (adjust_flow_attr(attrs, &vf_attrs, error))
342 return NULL;
343 attrs = &vf_attrs;
344 return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error);
345 }
346
347 static int
enic_vf_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)348 enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
349 struct rte_flow_error *error)
350 {
351 return enic_fm_flow_ops.destroy(dev, flow, error);
352 }
353
354 static int
enic_vf_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * actions,void * data,struct rte_flow_error * error)355 enic_vf_flow_query(struct rte_eth_dev *dev,
356 struct rte_flow *flow,
357 const struct rte_flow_action *actions,
358 void *data,
359 struct rte_flow_error *error)
360 {
361 return enic_fm_flow_ops.query(dev, flow, actions, data, error);
362 }
363
364 static int
enic_vf_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)365 enic_vf_flow_flush(struct rte_eth_dev *dev,
366 struct rte_flow_error *error)
367 {
368 return enic_fm_flow_ops.flush(dev, error);
369 }
370
371 static const struct rte_flow_ops enic_vf_flow_ops = {
372 .validate = enic_vf_flow_validate,
373 .create = enic_vf_flow_create,
374 .destroy = enic_vf_flow_destroy,
375 .flush = enic_vf_flow_flush,
376 .query = enic_vf_flow_query,
377 };
378
379 static int
enic_vf_filter_ctrl(struct rte_eth_dev * eth_dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)380 enic_vf_filter_ctrl(struct rte_eth_dev *eth_dev,
381 enum rte_filter_type filter_type,
382 enum rte_filter_op filter_op,
383 void *arg)
384 {
385 struct enic_vf_representor *vf;
386 int ret = 0;
387
388 ENICPMD_FUNC_TRACE();
389 vf = eth_dev->data->dev_private;
390 switch (filter_type) {
391 case RTE_ETH_FILTER_GENERIC:
392 if (filter_op != RTE_ETH_FILTER_GET)
393 return -EINVAL;
394 if (vf->enic.flow_filter_mode == FILTER_FLOWMAN) {
395 *(const void **)arg = &enic_vf_flow_ops;
396 } else {
397 ENICPMD_LOG(WARNING, "VF representors require flowman support for rte_flow API");
398 ret = -EINVAL;
399 }
400 break;
401 default:
402 ENICPMD_LOG(WARNING, "Filter type (%d) not supported",
403 filter_type);
404 ret = -EINVAL;
405 break;
406 }
407 return ret;
408 }
409
enic_vf_link_update(struct rte_eth_dev * eth_dev,int wait_to_complete __rte_unused)410 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
411 int wait_to_complete __rte_unused)
412 {
413 struct enic_vf_representor *vf;
414 struct rte_eth_link link;
415 struct enic *pf;
416
417 ENICPMD_FUNC_TRACE();
418 vf = eth_dev->data->dev_private;
419 pf = vf->pf;
420 /*
421 * Link status and speed are same as PF. Update PF status and then
422 * copy it to VF.
423 */
424 enic_link_update(pf->rte_dev);
425 rte_eth_linkstatus_get(pf->rte_dev, &link);
426 rte_eth_linkstatus_set(eth_dev, &link);
427 return 0;
428 }
429
enic_vf_stats_get(struct rte_eth_dev * eth_dev,struct rte_eth_stats * stats)430 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
431 struct rte_eth_stats *stats)
432 {
433 struct enic_vf_representor *vf;
434 struct vnic_stats *vs;
435 int err;
436
437 ENICPMD_FUNC_TRACE();
438 vf = eth_dev->data->dev_private;
439 /* Get VF stats via PF */
440 err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
441 if (err) {
442 ENICPMD_LOG(ERR, "error in getting stats\n");
443 return err;
444 }
445 stats->ipackets = vs->rx.rx_frames_ok;
446 stats->opackets = vs->tx.tx_frames_ok;
447 stats->ibytes = vs->rx.rx_bytes_ok;
448 stats->obytes = vs->tx.tx_bytes_ok;
449 stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
450 stats->oerrors = vs->tx.tx_errors;
451 stats->imissed = vs->rx.rx_no_bufs;
452 return 0;
453 }
454
enic_vf_stats_reset(struct rte_eth_dev * eth_dev)455 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
456 {
457 struct enic_vf_representor *vf;
458 int err;
459
460 ENICPMD_FUNC_TRACE();
461 vf = eth_dev->data->dev_private;
462 /* Ask PF to clear VF stats */
463 err = vnic_dev_stats_clear(vf->enic.vdev);
464 if (err)
465 ENICPMD_LOG(ERR, "error in clearing stats\n");
466 return err;
467 }
468
enic_vf_dev_infos_get(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * device_info)469 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
470 struct rte_eth_dev_info *device_info)
471 {
472 struct enic_vf_representor *vf;
473 struct enic *pf;
474
475 ENICPMD_FUNC_TRACE();
476 vf = eth_dev->data->dev_private;
477 pf = vf->pf;
478 device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
479 device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
480 device_info->min_rx_bufsize = ENIC_MIN_MTU;
481 /* Max packet size is same as PF */
482 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
483 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
484 /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
485 device_info->rx_offload_capa = 0;
486 device_info->tx_offload_capa = 0;
487 device_info->switch_info.name = pf->rte_dev->device->name;
488 device_info->switch_info.domain_id = vf->switch_domain_id;
489 device_info->switch_info.port_id = vf->vf_id;
490 return 0;
491 }
492
set_vf_packet_filter(struct enic_vf_representor * vf)493 static void set_vf_packet_filter(struct enic_vf_representor *vf)
494 {
495 /* switchdev: packet filters are ignored */
496 if (vf->enic.switchdev_mode)
497 return;
498 /* Ask PF to apply filters on VF */
499 vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
500 1 /* bcast */, vf->promisc, vf->allmulti);
501 }
502
enic_vf_promiscuous_enable(struct rte_eth_dev * eth_dev)503 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
504 {
505 struct enic_vf_representor *vf;
506
507 ENICPMD_FUNC_TRACE();
508 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
509 return -E_RTE_SECONDARY;
510 vf = eth_dev->data->dev_private;
511 vf->promisc = 1;
512 set_vf_packet_filter(vf);
513 return 0;
514 }
515
enic_vf_promiscuous_disable(struct rte_eth_dev * eth_dev)516 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
517 {
518 struct enic_vf_representor *vf;
519
520 ENICPMD_FUNC_TRACE();
521 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
522 return -E_RTE_SECONDARY;
523 vf = eth_dev->data->dev_private;
524 vf->promisc = 0;
525 set_vf_packet_filter(vf);
526 return 0;
527 }
528
enic_vf_allmulticast_enable(struct rte_eth_dev * eth_dev)529 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
530 {
531 struct enic_vf_representor *vf;
532
533 ENICPMD_FUNC_TRACE();
534 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
535 return -E_RTE_SECONDARY;
536 vf = eth_dev->data->dev_private;
537 vf->allmulti = 1;
538 set_vf_packet_filter(vf);
539 return 0;
540 }
541
enic_vf_allmulticast_disable(struct rte_eth_dev * eth_dev)542 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
543 {
544 struct enic_vf_representor *vf;
545
546 ENICPMD_FUNC_TRACE();
547 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
548 return -E_RTE_SECONDARY;
549 vf = eth_dev->data->dev_private;
550 vf->allmulti = 0;
551 set_vf_packet_filter(vf);
552 return 0;
553 }
554
555 /*
556 * A minimal set of handlers.
557 * The representor can get/set a small set of VF settings via "proxy" devcmd.
558 * With proxy devcmd, the PF driver basically tells the VIC firmware to
559 * "perform this devcmd on that VF".
560 */
561 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
562 .allmulticast_enable = enic_vf_allmulticast_enable,
563 .allmulticast_disable = enic_vf_allmulticast_disable,
564 .dev_configure = enic_vf_dev_configure,
565 .dev_infos_get = enic_vf_dev_infos_get,
566 .dev_start = enic_vf_dev_start,
567 .dev_stop = enic_vf_dev_stop,
568 .dev_close = enic_vf_dev_close,
569 .filter_ctrl = enic_vf_filter_ctrl,
570 .link_update = enic_vf_link_update,
571 .promiscuous_enable = enic_vf_promiscuous_enable,
572 .promiscuous_disable = enic_vf_promiscuous_disable,
573 .stats_get = enic_vf_stats_get,
574 .stats_reset = enic_vf_stats_reset,
575 .rx_queue_setup = enic_vf_dev_rx_queue_setup,
576 .rx_queue_release = enic_vf_dev_rx_queue_release,
577 .tx_queue_setup = enic_vf_dev_tx_queue_setup,
578 .tx_queue_release = enic_vf_dev_tx_queue_release,
579 };
580
get_vf_config(struct enic_vf_representor * vf)581 static int get_vf_config(struct enic_vf_representor *vf)
582 {
583 struct vnic_enet_config *c;
584 struct enic *pf;
585 int switch_mtu;
586 int err;
587
588 c = &vf->config;
589 pf = vf->pf;
590 /* VF MAC */
591 err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
592 if (err) {
593 ENICPMD_LOG(ERR, "error in getting MAC address\n");
594 return err;
595 }
596 rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
597
598 /* VF MTU per its vNIC setting */
599 err = vnic_dev_spec(vf->enic.vdev,
600 offsetof(struct vnic_enet_config, mtu),
601 sizeof(c->mtu), &c->mtu);
602 if (err) {
603 ENICPMD_LOG(ERR, "error in getting MTU\n");
604 return err;
605 }
606 /*
607 * Blade switch (fabric interconnect) port's MTU. Assume the kernel
608 * enic driver runs on VF. That driver automatically adjusts its MTU
609 * according to the switch MTU.
610 */
611 switch_mtu = vnic_dev_mtu(pf->vdev);
612 vf->eth_dev->data->mtu = c->mtu;
613 if (switch_mtu > c->mtu)
614 vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
615 return 0;
616 }
617
enic_vf_representor_init(struct rte_eth_dev * eth_dev,void * init_params)618 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
619 {
620 struct enic_vf_representor *vf, *params;
621 struct rte_pci_device *pdev;
622 struct enic *pf, *vf_enic;
623 struct rte_pci_addr *addr;
624 int ret;
625
626 ENICPMD_FUNC_TRACE();
627 params = init_params;
628 vf = eth_dev->data->dev_private;
629 vf->switch_domain_id = params->switch_domain_id;
630 vf->vf_id = params->vf_id;
631 vf->eth_dev = eth_dev;
632 vf->pf = params->pf;
633 vf->allmulti = 1;
634 vf->promisc = 0;
635 pf = vf->pf;
636 vf->enic.switchdev_mode = pf->switchdev_mode;
637 /* Only switchdev is supported now */
638 RTE_ASSERT(vf->enic.switchdev_mode);
639 /* Allocate WQ, RQ, CQ for the representor */
640 vf->pf_wq_idx = vf_wq_idx(vf);
641 vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
642 vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
643 vf->pf_rq_data_idx = vf_rq_data_idx(vf);
644 /* Remove these assertions once queue allocation has an easy-to-use
645 * allocator API instead of index number calculations used throughout
646 * the driver..
647 */
648 RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
649 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
650 vf->pf_rq_sop_idx);
651 /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
652 pf->vf_required_wq++;
653 pf->vf_required_rq += 2; /* sop and data */
654 pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
655 ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
656 vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
657 vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
658 if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
659 ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
660 " >= number of RQs (%u) in CIMC or UCSM",
661 pf->conf_cq_count, pf->conf_rq_count);
662 return -EINVAL;
663 }
664
665 /* Check for non-existent VFs */
666 pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
667 if (vf->vf_id >= pdev->max_vfs) {
668 ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
669 vf->vf_id, pdev->max_vfs);
670 return -ENODEV;
671 }
672
673 eth_dev->device->driver = pf->rte_dev->device->driver;
674 eth_dev->dev_ops = &enic_vf_representor_dev_ops;
675 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
676 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
677 eth_dev->data->representor_id = vf->vf_id;
678 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
679 sizeof(struct rte_ether_addr) *
680 ENIC_UNICAST_PERFECT_FILTERS, 0);
681 if (eth_dev->data->mac_addrs == NULL)
682 return -ENOMEM;
683 /* Use 1 RX queue and 1 TX queue for representor path */
684 eth_dev->data->nb_rx_queues = 1;
685 eth_dev->data->nb_tx_queues = 1;
686 eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
687 eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
688 /* Initial link state copied from PF */
689 eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
690 /* Representor vdev to perform devcmd */
691 vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
692 if (vf->enic.vdev == NULL)
693 return -ENOMEM;
694 ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
695 if (ret)
696 return ret;
697 /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
698 ret = get_vf_config(vf);
699 if (ret)
700 return ret;
701
702 /*
703 * Calculate VF BDF. The firmware ensures that PF BDF is always
704 * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
705 */
706 vf->bdf = pdev->addr;
707 vf->bdf.function += vf->vf_id + 1;
708
709 /* Copy a few fields used by enic_fm_flow */
710 vf_enic = &vf->enic;
711 vf_enic->switch_domain_id = vf->switch_domain_id;
712 vf_enic->flow_filter_mode = pf->flow_filter_mode;
713 vf_enic->rte_dev = eth_dev;
714 vf_enic->dev_data = eth_dev->data;
715 LIST_INIT(&vf_enic->flows);
716 LIST_INIT(&vf_enic->memzone_list);
717 rte_spinlock_init(&vf_enic->memzone_list_lock);
718 addr = &vf->bdf;
719 snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
720 addr->domain, addr->bus, addr->devid, addr->function);
721 return 0;
722 }
723
enic_vf_representor_uninit(struct rte_eth_dev * eth_dev)724 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
725 {
726 struct enic_vf_representor *vf;
727
728 ENICPMD_FUNC_TRACE();
729 vf = eth_dev->data->dev_private;
730 vnic_dev_unregister(vf->enic.vdev);
731 return 0;
732 }
733