xref: /dpdk/drivers/net/netvsc/hn_vf.c (revision 7fc4c099)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16 
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27 #include <rte_alarm.h>
28 
29 #include "hn_logs.h"
30 #include "hn_var.h"
31 #include "hn_nvs.h"
32 
33 /* Search for VF with matching MAC address, return port id */
hn_vf_match(const struct rte_eth_dev * dev)34 static int hn_vf_match(const struct rte_eth_dev *dev)
35 {
36 	const struct rte_ether_addr *mac = dev->data->mac_addrs;
37 	int i;
38 
39 	RTE_ETH_FOREACH_DEV(i) {
40 		const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
41 		const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
42 
43 		if (vf_dev == dev)
44 			continue;
45 
46 		if (rte_is_same_ether_addr(mac, vf_mac))
47 			return i;
48 	}
49 	return -ENOENT;
50 }
51 
52 
53 /*
54  * Attach new PCI VF device and return the port_id
55  */
hn_vf_attach(struct rte_eth_dev * dev,struct hn_data * hv)56 static int hn_vf_attach(struct rte_eth_dev *dev, struct hn_data *hv)
57 {
58 	struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
59 	int port, ret;
60 
61 	if (hv->vf_ctx.vf_attached) {
62 		PMD_DRV_LOG(ERR, "VF already attached");
63 		return 0;
64 	}
65 
66 	port = hn_vf_match(dev);
67 	if (port < 0) {
68 		PMD_DRV_LOG(NOTICE, "Couldn't find port for VF");
69 		return port;
70 	}
71 
72 	PMD_DRV_LOG(NOTICE, "found matching VF port %d", port);
73 	ret = rte_eth_dev_owner_get(port, &owner);
74 	if (ret < 0) {
75 		PMD_DRV_LOG(ERR, "Can not find owner for port %d", port);
76 		return ret;
77 	}
78 
79 	if (owner.id != RTE_ETH_DEV_NO_OWNER) {
80 		PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
81 			    port, owner.name);
82 		return -EBUSY;
83 	}
84 
85 	ret = rte_eth_dev_owner_set(port, &hv->owner);
86 	if (ret < 0) {
87 		PMD_DRV_LOG(ERR, "Can set owner for port %d", port);
88 		return ret;
89 	}
90 
91 	PMD_DRV_LOG(DEBUG, "Attach VF device %u", port);
92 	hv->vf_ctx.vf_attached = true;
93 	hv->vf_ctx.vf_port = port;
94 	return 0;
95 }
96 
97 static void hn_vf_remove(struct hn_data *hv);
98 
hn_remove_delayed(void * args)99 static void hn_remove_delayed(void *args)
100 {
101 	struct hn_data *hv = args;
102 	uint16_t port_id = hv->vf_ctx.vf_port;
103 	struct rte_device *dev = rte_eth_devices[port_id].device;
104 	int ret;
105 
106 	/* Tell VSP to switch data path to synthetic */
107 	hn_vf_remove(hv);
108 
109 	PMD_DRV_LOG(NOTICE, "Start to remove port %d", port_id);
110 	rte_rwlock_write_lock(&hv->vf_lock);
111 
112 	/* Give back ownership */
113 	ret = rte_eth_dev_owner_unset(port_id, hv->owner.id);
114 	if (ret)
115 		PMD_DRV_LOG(ERR, "rte_eth_dev_owner_unset failed ret=%d",
116 			    ret);
117 	hv->vf_ctx.vf_attached = false;
118 
119 	ret = rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_INTR_RMV,
120 					      hn_eth_rmv_event_callback, hv);
121 	if (ret)
122 		PMD_DRV_LOG(ERR,
123 			    "rte_eth_dev_callback_unregister failed ret=%d",
124 			    ret);
125 
126 	/* Detach and release port_id from system */
127 	ret = rte_eth_dev_stop(port_id);
128 	if (ret)
129 		PMD_DRV_LOG(ERR, "rte_eth_dev_stop failed port_id=%u ret=%d",
130 			    port_id, ret);
131 
132 	/* Record the device parameters for possible hotplug events */
133 	if (dev->devargs && dev->devargs->args)
134 		hv->vf_devargs = strdup(dev->devargs->args);
135 
136 	ret = rte_eth_dev_close(port_id);
137 	if (ret)
138 		PMD_DRV_LOG(ERR, "rte_eth_dev_close failed port_id=%u ret=%d",
139 			    port_id, ret);
140 
141 	ret = rte_dev_remove(dev);
142 	hv->vf_ctx.vf_state = vf_removed;
143 
144 	rte_rwlock_write_unlock(&hv->vf_lock);
145 }
146 
hn_eth_rmv_event_callback(uint16_t port_id,enum rte_eth_event_type event __rte_unused,void * cb_arg,void * out __rte_unused)147 int hn_eth_rmv_event_callback(uint16_t port_id,
148 			      enum rte_eth_event_type event __rte_unused,
149 			      void *cb_arg, void *out __rte_unused)
150 {
151 	struct hn_data *hv = cb_arg;
152 
153 	PMD_DRV_LOG(NOTICE, "Removing VF portid %d", port_id);
154 	rte_eal_alarm_set(1, hn_remove_delayed, hv);
155 
156 	return 0;
157 }
158 
hn_setup_vf_queues(int port,struct rte_eth_dev * dev)159 static int hn_setup_vf_queues(int port, struct rte_eth_dev *dev)
160 {
161 	struct hn_rx_queue *rx_queue;
162 	struct rte_eth_txq_info txinfo;
163 	struct rte_eth_rxq_info rxinfo;
164 	int i, ret = 0;
165 
166 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
167 		ret = rte_eth_tx_queue_info_get(dev->data->port_id, i, &txinfo);
168 		if (ret) {
169 			PMD_DRV_LOG(ERR,
170 				    "rte_eth_tx_queue_info_get failed ret=%d",
171 				    ret);
172 			return ret;
173 		}
174 
175 		ret = rte_eth_tx_queue_setup(port, i, txinfo.nb_desc, 0,
176 					     &txinfo.conf);
177 		if (ret) {
178 			PMD_DRV_LOG(ERR,
179 				    "rte_eth_tx_queue_setup failed ret=%d",
180 				    ret);
181 			return ret;
182 		}
183 	}
184 
185 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
186 		ret = rte_eth_rx_queue_info_get(dev->data->port_id, i, &rxinfo);
187 		if (ret) {
188 			PMD_DRV_LOG(ERR,
189 				    "rte_eth_rx_queue_info_get failed ret=%d",
190 				    ret);
191 			return ret;
192 		}
193 
194 		rx_queue = dev->data->rx_queues[i];
195 
196 		ret = rte_eth_rx_queue_setup(port, i, rxinfo.nb_desc, 0,
197 					     &rxinfo.conf, rx_queue->mb_pool);
198 		if (ret) {
199 			PMD_DRV_LOG(ERR,
200 				    "rte_eth_rx_queue_setup failed ret=%d",
201 				    ret);
202 			return ret;
203 		}
204 	}
205 
206 	return ret;
207 }
208 
209 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
210 
hn_vf_add_retry(void * args)211 static void hn_vf_add_retry(void *args)
212 {
213 	struct rte_eth_dev *dev = args;
214 	struct hn_data *hv = dev->data->dev_private;
215 
216 	hn_vf_add(dev, hv);
217 }
218 
219 int hn_vf_configure(struct rte_eth_dev *dev,
220 		    const struct rte_eth_conf *dev_conf);
221 
222 /* Add new VF device to synthetic device */
hn_vf_add(struct rte_eth_dev * dev,struct hn_data * hv)223 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
224 {
225 	int ret, port;
226 
227 	if (!hv->vf_ctx.vf_vsp_reported || hv->vf_ctx.vf_vsc_switched)
228 		return 0;
229 
230 	rte_rwlock_write_lock(&hv->vf_lock);
231 
232 	ret = hn_vf_attach(dev, hv);
233 	if (ret) {
234 		PMD_DRV_LOG(NOTICE,
235 			    "RNDIS reports VF but device not found, retrying");
236 		rte_eal_alarm_set(1000000, hn_vf_add_retry, dev);
237 		goto exit;
238 	}
239 
240 	port = hv->vf_ctx.vf_port;
241 
242 	/* If the primary device has started, this is a VF host add.
243 	 * Configure and start VF device.
244 	 */
245 	if (dev->data->dev_started) {
246 		if (rte_eth_devices[port].data->dev_started) {
247 			PMD_DRV_LOG(ERR, "VF already started on hot add");
248 			goto exit;
249 		}
250 
251 		PMD_DRV_LOG(NOTICE, "configuring VF port %d", port);
252 		ret = hn_vf_configure(dev, &dev->data->dev_conf);
253 		if (ret) {
254 			PMD_DRV_LOG(ERR, "Failed to configure VF port %d",
255 				    port);
256 			goto exit;
257 		}
258 
259 		ret = hn_setup_vf_queues(port, dev);
260 		if (ret) {
261 			PMD_DRV_LOG(ERR,
262 				    "Failed to configure VF queues port %d",
263 				    port);
264 			goto exit;
265 		}
266 
267 		PMD_DRV_LOG(NOTICE, "Starting VF port %d", port);
268 		ret = rte_eth_dev_start(port);
269 		if (ret) {
270 			PMD_DRV_LOG(ERR, "rte_eth_dev_start failed ret=%d",
271 				    ret);
272 			goto exit;
273 		}
274 		hv->vf_ctx.vf_state = vf_started;
275 	}
276 
277 	ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
278 	if (ret == 0)
279 		hv->vf_ctx.vf_vsc_switched = true;
280 
281 exit:
282 	rte_rwlock_write_unlock(&hv->vf_lock);
283 	return ret;
284 }
285 
286 /* Switch data path to VF device */
hn_vf_remove(struct hn_data * hv)287 static void hn_vf_remove(struct hn_data *hv)
288 {
289 	int ret;
290 
291 	if (!hv->vf_ctx.vf_vsc_switched) {
292 		PMD_DRV_LOG(ERR, "VF path not active");
293 		return;
294 	}
295 
296 	rte_rwlock_write_lock(&hv->vf_lock);
297 	if (!hv->vf_ctx.vf_vsc_switched) {
298 		PMD_DRV_LOG(ERR, "VF path not active");
299 	} else {
300 		/* Stop incoming packets from arriving on VF */
301 		ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
302 		if (ret == 0)
303 			hv->vf_ctx.vf_vsc_switched = false;
304 	}
305 	rte_rwlock_write_unlock(&hv->vf_lock);
306 }
307 
308 /* Handle VF association message from host */
309 void
hn_nvs_handle_vfassoc(struct rte_eth_dev * dev,const struct vmbus_chanpkt_hdr * hdr,const void * data)310 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
311 		      const struct vmbus_chanpkt_hdr *hdr,
312 		      const void *data)
313 {
314 	struct hn_data *hv = dev->data->dev_private;
315 	const struct hn_nvs_vf_association *vf_assoc = data;
316 
317 	if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
318 		PMD_DRV_LOG(ERR, "invalid vf association NVS");
319 		return;
320 	}
321 
322 	PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
323 		    vf_assoc->serial,
324 		    vf_assoc->allocated ? "add to" : "remove from",
325 		    dev->data->port_id);
326 
327 	hv->vf_ctx.vf_vsp_reported = vf_assoc->allocated;
328 
329 	if (dev->state == RTE_ETH_DEV_ATTACHED) {
330 		if (vf_assoc->allocated)
331 			hn_vf_add(dev, hv);
332 		else
333 			hn_vf_remove(hv);
334 	}
335 }
336 
337 static void
hn_vf_merge_desc_lim(struct rte_eth_desc_lim * lim,const struct rte_eth_desc_lim * vf_lim)338 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
339 		     const struct rte_eth_desc_lim *vf_lim)
340 {
341 	lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
342 	lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
343 	lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
344 	lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
345 	lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
346 }
347 
348 /*
349  * Merge the info from the VF and synthetic path.
350  * use the default config of the VF
351  * and the minimum number of queues and buffer sizes.
352  */
hn_vf_info_merge(struct rte_eth_dev * vf_dev,struct rte_eth_dev_info * info)353 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
354 			     struct rte_eth_dev_info *info)
355 {
356 	struct rte_eth_dev_info vf_info;
357 	int ret;
358 
359 	ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
360 	if (ret != 0)
361 		return ret;
362 
363 	info->speed_capa = vf_info.speed_capa;
364 	info->default_rxportconf = vf_info.default_rxportconf;
365 	info->default_txportconf = vf_info.default_txportconf;
366 
367 	info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
368 				      info->max_rx_queues);
369 	info->rx_offload_capa &= vf_info.rx_offload_capa;
370 	info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
371 	info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
372 
373 	info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
374 				      info->max_tx_queues);
375 	info->tx_offload_capa &= vf_info.tx_offload_capa;
376 	info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
377 	hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
378 
379 	info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
380 				       info->min_rx_bufsize);
381 	info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
382 				       info->max_rx_pktlen);
383 	hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
384 
385 	return 0;
386 }
387 
hn_vf_info_get(struct hn_data * hv,struct rte_eth_dev_info * info)388 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
389 {
390 	struct rte_eth_dev *vf_dev;
391 	int ret = 0;
392 
393 	rte_rwlock_read_lock(&hv->vf_lock);
394 	vf_dev = hn_get_vf_dev(hv);
395 	if (vf_dev)
396 		ret = hn_vf_info_merge(vf_dev, info);
397 	rte_rwlock_read_unlock(&hv->vf_lock);
398 	return ret;
399 }
400 
hn_vf_configure(struct rte_eth_dev * dev,const struct rte_eth_conf * dev_conf)401 int hn_vf_configure(struct rte_eth_dev *dev,
402 		    const struct rte_eth_conf *dev_conf)
403 {
404 	struct hn_data *hv = dev->data->dev_private;
405 	struct rte_eth_conf vf_conf = *dev_conf;
406 	int ret = 0;
407 
408 	/* link state interrupt does not matter here. */
409 	vf_conf.intr_conf.lsc = 0;
410 
411 	/* need to monitor removal event */
412 	vf_conf.intr_conf.rmv = 1;
413 
414 	if (hv->vf_ctx.vf_attached) {
415 		ret = rte_eth_dev_callback_register(hv->vf_ctx.vf_port,
416 						    RTE_ETH_EVENT_INTR_RMV,
417 						    hn_eth_rmv_event_callback,
418 						    hv);
419 		if (ret) {
420 			PMD_DRV_LOG(ERR,
421 				    "Registering callback failed for vf port %d ret %d",
422 				    hv->vf_ctx.vf_port, ret);
423 			return ret;
424 		}
425 
426 		ret = rte_eth_dev_configure(hv->vf_ctx.vf_port,
427 					    dev->data->nb_rx_queues,
428 					    dev->data->nb_tx_queues,
429 					    &vf_conf);
430 		if (ret) {
431 			PMD_DRV_LOG(ERR, "VF configuration failed: %d", ret);
432 
433 			rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
434 							RTE_ETH_EVENT_INTR_RMV,
435 							hn_eth_rmv_event_callback,
436 							hv);
437 
438 			return ret;
439 		}
440 
441 		hv->vf_ctx.vf_state = vf_configured;
442 	}
443 
444 	return ret;
445 }
446 
447 /* Configure VF if present.
448  * VF device will have the same number of queues as the synthetic device
449  */
hn_vf_configure_locked(struct rte_eth_dev * dev,const struct rte_eth_conf * dev_conf)450 int hn_vf_configure_locked(struct rte_eth_dev *dev,
451 			   const struct rte_eth_conf *dev_conf)
452 {
453 	struct hn_data *hv = dev->data->dev_private;
454 	int ret = 0;
455 
456 	rte_rwlock_write_lock(&hv->vf_lock);
457 	ret = hn_vf_configure(dev, dev_conf);
458 	rte_rwlock_write_unlock(&hv->vf_lock);
459 
460 	return ret;
461 }
462 
hn_vf_supported_ptypes(struct rte_eth_dev * dev)463 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
464 {
465 	struct hn_data *hv = dev->data->dev_private;
466 	struct rte_eth_dev *vf_dev;
467 	const uint32_t *ptypes = NULL;
468 
469 	rte_rwlock_read_lock(&hv->vf_lock);
470 	vf_dev = hn_get_vf_dev(hv);
471 	if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
472 		ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
473 	rte_rwlock_read_unlock(&hv->vf_lock);
474 
475 	return ptypes;
476 }
477 
hn_vf_start(struct rte_eth_dev * dev)478 int hn_vf_start(struct rte_eth_dev *dev)
479 {
480 	struct hn_data *hv = dev->data->dev_private;
481 	struct rte_eth_dev *vf_dev;
482 	int ret = 0;
483 
484 	rte_rwlock_read_lock(&hv->vf_lock);
485 	vf_dev = hn_get_vf_dev(hv);
486 	if (vf_dev)
487 		ret = rte_eth_dev_start(vf_dev->data->port_id);
488 	rte_rwlock_read_unlock(&hv->vf_lock);
489 	return ret;
490 }
491 
hn_vf_stop(struct rte_eth_dev * dev)492 int hn_vf_stop(struct rte_eth_dev *dev)
493 {
494 	struct hn_data *hv = dev->data->dev_private;
495 	struct rte_eth_dev *vf_dev;
496 	int ret = 0;
497 
498 	rte_rwlock_read_lock(&hv->vf_lock);
499 	vf_dev = hn_get_vf_dev(hv);
500 	if (vf_dev) {
501 		ret = rte_eth_dev_stop(vf_dev->data->port_id);
502 		if (ret != 0)
503 			PMD_DRV_LOG(ERR, "Failed to stop device on port %u",
504 				    vf_dev->data->port_id);
505 	}
506 	rte_rwlock_read_unlock(&hv->vf_lock);
507 
508 	return ret;
509 }
510 
511 /* If VF is present, then cascade configuration down */
512 #define VF_ETHDEV_FUNC(dev, func)				\
513 	{							\
514 		struct hn_data *hv = (dev)->data->dev_private;	\
515 		struct rte_eth_dev *vf_dev;			\
516 		rte_rwlock_read_lock(&hv->vf_lock);		\
517 		vf_dev = hn_get_vf_dev(hv);			\
518 		if (vf_dev)					\
519 			func(vf_dev->data->port_id);		\
520 		rte_rwlock_read_unlock(&hv->vf_lock);		\
521 	}
522 
523 /* If VF is present, then cascade configuration down */
524 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)			\
525 	{							\
526 		struct hn_data *hv = (dev)->data->dev_private;	\
527 		struct rte_eth_dev *vf_dev;			\
528 		int ret = 0;					\
529 		rte_rwlock_read_lock(&hv->vf_lock);		\
530 		vf_dev = hn_get_vf_dev(hv);			\
531 		if (vf_dev)					\
532 			ret = func(vf_dev->data->port_id);	\
533 		rte_rwlock_read_unlock(&hv->vf_lock);		\
534 		return ret;					\
535 	}
536 
hn_vf_reset(struct rte_eth_dev * dev)537 void hn_vf_reset(struct rte_eth_dev *dev)
538 {
539 	VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
540 }
541 
hn_vf_close(struct rte_eth_dev * dev)542 int hn_vf_close(struct rte_eth_dev *dev)
543 {
544 	int ret = 0;
545 	struct hn_data *hv = dev->data->dev_private;
546 
547 	rte_eal_alarm_cancel(hn_vf_add_retry, dev);
548 
549 	rte_rwlock_read_lock(&hv->vf_lock);
550 	if (hv->vf_ctx.vf_attached) {
551 		rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
552 						RTE_ETH_EVENT_INTR_RMV,
553 						hn_eth_rmv_event_callback,
554 						hv);
555 		rte_eal_alarm_cancel(hn_remove_delayed, hv);
556 		ret = rte_eth_dev_close(hv->vf_ctx.vf_port);
557 		hv->vf_ctx.vf_attached = false;
558 	}
559 	rte_rwlock_read_unlock(&hv->vf_lock);
560 
561 	return ret;
562 }
563 
hn_vf_stats_reset(struct rte_eth_dev * dev)564 int hn_vf_stats_reset(struct rte_eth_dev *dev)
565 {
566 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
567 }
568 
hn_vf_allmulticast_enable(struct rte_eth_dev * dev)569 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
570 {
571 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
572 }
573 
hn_vf_allmulticast_disable(struct rte_eth_dev * dev)574 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
575 {
576 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
577 }
578 
hn_vf_promiscuous_enable(struct rte_eth_dev * dev)579 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
580 {
581 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
582 }
583 
hn_vf_promiscuous_disable(struct rte_eth_dev * dev)584 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
585 {
586 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
587 }
588 
hn_vf_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)589 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
590 			struct rte_ether_addr *mc_addr_set,
591 			uint32_t nb_mc_addr)
592 {
593 	struct hn_data *hv = dev->data->dev_private;
594 	struct rte_eth_dev *vf_dev;
595 	int ret = 0;
596 
597 	rte_rwlock_read_lock(&hv->vf_lock);
598 	vf_dev = hn_get_vf_dev(hv);
599 	if (vf_dev)
600 		ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
601 						   mc_addr_set, nb_mc_addr);
602 	rte_rwlock_read_unlock(&hv->vf_lock);
603 	return ret;
604 }
605 
hn_vf_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)606 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
607 			 uint16_t queue_idx, uint16_t nb_desc,
608 			 unsigned int socket_id,
609 			 const struct rte_eth_txconf *tx_conf)
610 {
611 	struct hn_data *hv = dev->data->dev_private;
612 	struct rte_eth_dev *vf_dev;
613 	int ret = 0;
614 
615 	rte_rwlock_read_lock(&hv->vf_lock);
616 	vf_dev = hn_get_vf_dev(hv);
617 	if (vf_dev)
618 		ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
619 					     queue_idx, nb_desc,
620 					     socket_id, tx_conf);
621 	rte_rwlock_read_unlock(&hv->vf_lock);
622 	return ret;
623 }
624 
hn_vf_tx_queue_release(struct hn_data * hv,uint16_t queue_id)625 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
626 {
627 	struct rte_eth_dev *vf_dev;
628 
629 	rte_rwlock_read_lock(&hv->vf_lock);
630 	vf_dev = hn_get_vf_dev(hv);
631 	if (vf_dev && vf_dev->dev_ops->tx_queue_release)
632 		(*vf_dev->dev_ops->tx_queue_release)(vf_dev, queue_id);
633 
634 	rte_rwlock_read_unlock(&hv->vf_lock);
635 }
636 
hn_vf_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)637 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
638 			 uint16_t queue_idx, uint16_t nb_desc,
639 			 unsigned int socket_id,
640 			 const struct rte_eth_rxconf *rx_conf,
641 			 struct rte_mempool *mp)
642 {
643 	struct hn_data *hv = dev->data->dev_private;
644 	struct rte_eth_dev *vf_dev;
645 	int ret = 0;
646 
647 	rte_rwlock_read_lock(&hv->vf_lock);
648 	vf_dev = hn_get_vf_dev(hv);
649 	if (vf_dev)
650 		ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
651 					     queue_idx, nb_desc,
652 					     socket_id, rx_conf, mp);
653 	rte_rwlock_read_unlock(&hv->vf_lock);
654 	return ret;
655 }
656 
hn_vf_rx_queue_release(struct hn_data * hv,uint16_t queue_id)657 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
658 {
659 	struct rte_eth_dev *vf_dev;
660 
661 	rte_rwlock_read_lock(&hv->vf_lock);
662 	vf_dev = hn_get_vf_dev(hv);
663 	if (vf_dev && vf_dev->dev_ops->rx_queue_release)
664 		(*vf_dev->dev_ops->rx_queue_release)(vf_dev, queue_id);
665 	rte_rwlock_read_unlock(&hv->vf_lock);
666 }
667 
hn_vf_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)668 int hn_vf_stats_get(struct rte_eth_dev *dev,
669 		    struct rte_eth_stats *stats)
670 {
671 	struct hn_data *hv = dev->data->dev_private;
672 	struct rte_eth_dev *vf_dev;
673 	int ret = 0;
674 
675 	rte_rwlock_read_lock(&hv->vf_lock);
676 	vf_dev = hn_get_vf_dev(hv);
677 	if (vf_dev)
678 		ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
679 	rte_rwlock_read_unlock(&hv->vf_lock);
680 	return ret;
681 }
682 
hn_vf_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * names,unsigned int n)683 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
684 			   struct rte_eth_xstat_name *names,
685 			   unsigned int n)
686 {
687 	struct hn_data *hv = dev->data->dev_private;
688 	struct rte_eth_dev *vf_dev;
689 	int i, count = 0;
690 
691 	rte_rwlock_read_lock(&hv->vf_lock);
692 	vf_dev = hn_get_vf_dev(hv);
693 	if (vf_dev)
694 		count = rte_eth_xstats_get_names(vf_dev->data->port_id,
695 						 names, n);
696 	rte_rwlock_read_unlock(&hv->vf_lock);
697 
698 	/* add vf_ prefix to xstat names */
699 	if (names) {
700 		for (i = 0; i < count; i++) {
701 			char tmp[RTE_ETH_XSTATS_NAME_SIZE];
702 
703 			snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
704 			strlcpy(names[i].name, tmp, sizeof(names[i].name));
705 		}
706 	}
707 
708 	return count;
709 }
710 
hn_vf_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int offset,unsigned int n)711 int hn_vf_xstats_get(struct rte_eth_dev *dev,
712 		     struct rte_eth_xstat *xstats,
713 		     unsigned int offset,
714 		     unsigned int n)
715 {
716 	struct hn_data *hv = dev->data->dev_private;
717 	struct rte_eth_dev *vf_dev;
718 	int i, count = 0;
719 
720 	rte_rwlock_read_lock(&hv->vf_lock);
721 	vf_dev = hn_get_vf_dev(hv);
722 	if (vf_dev)
723 		count = rte_eth_xstats_get(vf_dev->data->port_id,
724 					   xstats + offset, n - offset);
725 	rte_rwlock_read_unlock(&hv->vf_lock);
726 
727 	/* Offset id's for VF stats */
728 	if (count > 0) {
729 		for (i = 0; i < count; i++)
730 			xstats[i + offset].id += offset;
731 	}
732 
733 	return count;
734 }
735 
hn_vf_xstats_reset(struct rte_eth_dev * dev)736 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
737 {
738 	struct hn_data *hv = dev->data->dev_private;
739 	struct rte_eth_dev *vf_dev;
740 	int ret;
741 
742 	rte_rwlock_read_lock(&hv->vf_lock);
743 	vf_dev = hn_get_vf_dev(hv);
744 	if (vf_dev)
745 		ret = rte_eth_xstats_reset(vf_dev->data->port_id);
746 	else
747 		ret = -EINVAL;
748 	rte_rwlock_read_unlock(&hv->vf_lock);
749 
750 	return ret;
751 }
752 
hn_vf_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)753 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
754 			  struct rte_eth_rss_conf *rss_conf)
755 {
756 	struct hn_data *hv = dev->data->dev_private;
757 	struct rte_eth_dev *vf_dev;
758 	int ret = 0;
759 
760 	rte_rwlock_read_lock(&hv->vf_lock);
761 	vf_dev = hn_get_vf_dev(hv);
762 	if (vf_dev && vf_dev->dev_ops->rss_hash_update)
763 		ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
764 	rte_rwlock_read_unlock(&hv->vf_lock);
765 
766 	return ret;
767 }
768 
hn_vf_reta_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)769 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
770 			   struct rte_eth_rss_reta_entry64 *reta_conf,
771 			   uint16_t reta_size)
772 {
773 	struct hn_data *hv = dev->data->dev_private;
774 	struct rte_eth_dev *vf_dev;
775 	int ret = 0;
776 
777 	rte_rwlock_read_lock(&hv->vf_lock);
778 	vf_dev = hn_get_vf_dev(hv);
779 	if (vf_dev && vf_dev->dev_ops->reta_update)
780 		ret = vf_dev->dev_ops->reta_update(vf_dev,
781 						   reta_conf, reta_size);
782 	rte_rwlock_read_unlock(&hv->vf_lock);
783 
784 	return ret;
785 }
786