xref: /f-stack/dpdk/drivers/net/netvsc/hn_vf.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16 
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27 
28 #include "hn_logs.h"
29 #include "hn_var.h"
30 #include "hn_nvs.h"
31 
32 /* Search for VF with matching MAC address, return port id */
hn_vf_match(const struct rte_eth_dev * dev)33 static int hn_vf_match(const struct rte_eth_dev *dev)
34 {
35 	const struct rte_ether_addr *mac = dev->data->mac_addrs;
36 	int i;
37 
38 	RTE_ETH_FOREACH_DEV(i) {
39 		const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40 		const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
41 
42 		if (vf_dev == dev)
43 			continue;
44 
45 		if (rte_is_same_ether_addr(mac, vf_mac))
46 			return i;
47 	}
48 	return -ENOENT;
49 }
50 
51 
52 /*
53  * Attach new PCI VF device and return the port_id
54  */
hn_vf_attach(struct hn_data * hv,uint16_t port_id)55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
56 {
57 	struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
58 	int ret;
59 
60 	if (hn_vf_attached(hv)) {
61 		PMD_DRV_LOG(ERR, "VF already attached");
62 		return -EEXIST;
63 	}
64 
65 	ret = rte_eth_dev_owner_get(port_id, &owner);
66 	if (ret < 0) {
67 		PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
68 		return ret;
69 	}
70 
71 	if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72 		PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
73 			    port_id, owner.name);
74 		return -EBUSY;
75 	}
76 
77 	ret = rte_eth_dev_owner_set(port_id, &hv->owner);
78 	if (ret < 0) {
79 		PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
80 		return ret;
81 	}
82 
83 	PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84 	hv->vf_port = port_id;
85 	return 0;
86 }
87 
88 /* Add new VF device to synthetic device */
hn_vf_add(struct rte_eth_dev * dev,struct hn_data * hv)89 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
90 {
91 	int port, err;
92 
93 	port = hn_vf_match(dev);
94 	if (port < 0) {
95 		PMD_DRV_LOG(NOTICE, "No matching MAC found");
96 		return port;
97 	}
98 
99 	err = hn_vf_attach(hv, port);
100 	if (err == 0)
101 		hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
102 
103 	return err;
104 }
105 
106 /* Remove new VF device */
hn_vf_remove(struct hn_data * hv)107 static void hn_vf_remove(struct hn_data *hv)
108 {
109 
110 	if (!hn_vf_attached(hv)) {
111 		PMD_DRV_LOG(ERR, "VF path not active");
112 	} else {
113 		/* Stop incoming packets from arriving on VF */
114 		hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
115 
116 		/* Give back ownership */
117 		rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
118 
119 		/* Stop transmission over VF */
120 		hv->vf_port = HN_INVALID_PORT;
121 	}
122 }
123 
124 /* Handle VF association message from host */
125 void
hn_nvs_handle_vfassoc(struct rte_eth_dev * dev,const struct vmbus_chanpkt_hdr * hdr,const void * data)126 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
127 		      const struct vmbus_chanpkt_hdr *hdr,
128 		      const void *data)
129 {
130 	struct hn_data *hv = dev->data->dev_private;
131 	const struct hn_nvs_vf_association *vf_assoc = data;
132 
133 	if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
134 		PMD_DRV_LOG(ERR, "invalid vf association NVS");
135 		return;
136 	}
137 
138 	PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
139 		    vf_assoc->serial,
140 		    vf_assoc->allocated ? "add to" : "remove from",
141 		    dev->data->port_id);
142 
143 	rte_rwlock_write_lock(&hv->vf_lock);
144 	hv->vf_present = vf_assoc->allocated;
145 
146 	if (dev->state == RTE_ETH_DEV_ATTACHED) {
147 		if (vf_assoc->allocated)
148 			hn_vf_add(dev, hv);
149 		else
150 			hn_vf_remove(hv);
151 	}
152 	rte_rwlock_write_unlock(&hv->vf_lock);
153 }
154 
155 static void
hn_vf_merge_desc_lim(struct rte_eth_desc_lim * lim,const struct rte_eth_desc_lim * vf_lim)156 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
157 		     const struct rte_eth_desc_lim *vf_lim)
158 {
159 	lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
160 	lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
161 	lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
162 	lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
163 	lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
164 }
165 
166 /*
167  * Merge the info from the VF and synthetic path.
168  * use the default config of the VF
169  * and the minimum number of queues and buffer sizes.
170  */
hn_vf_info_merge(struct rte_eth_dev * vf_dev,struct rte_eth_dev_info * info)171 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
172 			     struct rte_eth_dev_info *info)
173 {
174 	struct rte_eth_dev_info vf_info;
175 	int ret;
176 
177 	ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
178 	if (ret != 0)
179 		return ret;
180 
181 	info->speed_capa = vf_info.speed_capa;
182 	info->default_rxportconf = vf_info.default_rxportconf;
183 	info->default_txportconf = vf_info.default_txportconf;
184 
185 	info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
186 				      info->max_rx_queues);
187 	info->rx_offload_capa &= vf_info.rx_offload_capa;
188 	info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
189 	info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
190 
191 	info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
192 				      info->max_tx_queues);
193 	info->tx_offload_capa &= vf_info.tx_offload_capa;
194 	info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
195 	hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
196 
197 	info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
198 				       info->min_rx_bufsize);
199 	info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
200 				       info->max_rx_pktlen);
201 	hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
202 
203 	return 0;
204 }
205 
hn_vf_info_get(struct hn_data * hv,struct rte_eth_dev_info * info)206 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
207 {
208 	struct rte_eth_dev *vf_dev;
209 	int ret = 0;
210 
211 	rte_rwlock_read_lock(&hv->vf_lock);
212 	vf_dev = hn_get_vf_dev(hv);
213 	if (vf_dev)
214 		ret = hn_vf_info_merge(vf_dev, info);
215 	rte_rwlock_read_unlock(&hv->vf_lock);
216 	return ret;
217 }
218 
219 /*
220  * Configure VF if present.
221  * Force VF to have same number of queues as synthetic device
222  */
hn_vf_configure(struct rte_eth_dev * dev,const struct rte_eth_conf * dev_conf)223 int hn_vf_configure(struct rte_eth_dev *dev,
224 		    const struct rte_eth_conf *dev_conf)
225 {
226 	struct hn_data *hv = dev->data->dev_private;
227 	struct rte_eth_conf vf_conf = *dev_conf;
228 	int ret = 0;
229 
230 	/* link state interrupt does not matter here. */
231 	vf_conf.intr_conf.lsc = 0;
232 
233 	rte_rwlock_read_lock(&hv->vf_lock);
234 	if (hv->vf_port != HN_INVALID_PORT) {
235 		ret = rte_eth_dev_configure(hv->vf_port,
236 					    dev->data->nb_rx_queues,
237 					    dev->data->nb_tx_queues,
238 					    &vf_conf);
239 		if (ret != 0)
240 			PMD_DRV_LOG(ERR,
241 				    "VF configuration failed: %d", ret);
242 	}
243 	rte_rwlock_read_unlock(&hv->vf_lock);
244 	return ret;
245 }
246 
hn_vf_supported_ptypes(struct rte_eth_dev * dev)247 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
248 {
249 	struct hn_data *hv = dev->data->dev_private;
250 	struct rte_eth_dev *vf_dev;
251 	const uint32_t *ptypes = NULL;
252 
253 	rte_rwlock_read_lock(&hv->vf_lock);
254 	vf_dev = hn_get_vf_dev(hv);
255 	if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
256 		ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
257 	rte_rwlock_read_unlock(&hv->vf_lock);
258 
259 	return ptypes;
260 }
261 
hn_vf_start(struct rte_eth_dev * dev)262 int hn_vf_start(struct rte_eth_dev *dev)
263 {
264 	struct hn_data *hv = dev->data->dev_private;
265 	struct rte_eth_dev *vf_dev;
266 	int ret = 0;
267 
268 	rte_rwlock_read_lock(&hv->vf_lock);
269 	vf_dev = hn_get_vf_dev(hv);
270 	if (vf_dev)
271 		ret = rte_eth_dev_start(vf_dev->data->port_id);
272 	rte_rwlock_read_unlock(&hv->vf_lock);
273 	return ret;
274 }
275 
hn_vf_stop(struct rte_eth_dev * dev)276 int hn_vf_stop(struct rte_eth_dev *dev)
277 {
278 	struct hn_data *hv = dev->data->dev_private;
279 	struct rte_eth_dev *vf_dev;
280 	int ret = 0;
281 
282 	rte_rwlock_read_lock(&hv->vf_lock);
283 	vf_dev = hn_get_vf_dev(hv);
284 	if (vf_dev) {
285 		ret = rte_eth_dev_stop(vf_dev->data->port_id);
286 		if (ret != 0)
287 			PMD_DRV_LOG(ERR, "Failed to stop device on port %u",
288 				    vf_dev->data->port_id);
289 	}
290 	rte_rwlock_read_unlock(&hv->vf_lock);
291 
292 	return ret;
293 }
294 
295 /* If VF is present, then cascade configuration down */
296 #define VF_ETHDEV_FUNC(dev, func)				\
297 	{							\
298 		struct hn_data *hv = (dev)->data->dev_private;	\
299 		struct rte_eth_dev *vf_dev;			\
300 		rte_rwlock_read_lock(&hv->vf_lock);		\
301 		vf_dev = hn_get_vf_dev(hv);			\
302 		if (vf_dev)					\
303 			func(vf_dev->data->port_id);		\
304 		rte_rwlock_read_unlock(&hv->vf_lock);		\
305 	}
306 
307 /* If VF is present, then cascade configuration down */
308 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)			\
309 	{							\
310 		struct hn_data *hv = (dev)->data->dev_private;	\
311 		struct rte_eth_dev *vf_dev;			\
312 		int ret = 0;					\
313 		rte_rwlock_read_lock(&hv->vf_lock);		\
314 		vf_dev = hn_get_vf_dev(hv);			\
315 		if (vf_dev)					\
316 			ret = func(vf_dev->data->port_id);	\
317 		rte_rwlock_read_unlock(&hv->vf_lock);		\
318 		return ret;					\
319 	}
320 
hn_vf_reset(struct rte_eth_dev * dev)321 void hn_vf_reset(struct rte_eth_dev *dev)
322 {
323 	VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
324 }
325 
hn_vf_close(struct rte_eth_dev * dev)326 int hn_vf_close(struct rte_eth_dev *dev)
327 {
328 	struct hn_data *hv = dev->data->dev_private;
329 	uint16_t vf_port;
330 	int ret = 0;
331 
332 	rte_rwlock_read_lock(&hv->vf_lock);
333 	vf_port = hv->vf_port;
334 	if (vf_port != HN_INVALID_PORT)
335 		ret = rte_eth_dev_close(vf_port);
336 
337 	hv->vf_port = HN_INVALID_PORT;
338 	rte_rwlock_read_unlock(&hv->vf_lock);
339 
340 	return ret;
341 }
342 
hn_vf_stats_reset(struct rte_eth_dev * dev)343 int hn_vf_stats_reset(struct rte_eth_dev *dev)
344 {
345 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
346 }
347 
hn_vf_allmulticast_enable(struct rte_eth_dev * dev)348 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
349 {
350 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
351 }
352 
hn_vf_allmulticast_disable(struct rte_eth_dev * dev)353 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
354 {
355 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
356 }
357 
hn_vf_promiscuous_enable(struct rte_eth_dev * dev)358 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
359 {
360 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
361 }
362 
hn_vf_promiscuous_disable(struct rte_eth_dev * dev)363 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
364 {
365 	VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
366 }
367 
hn_vf_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)368 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
369 			struct rte_ether_addr *mc_addr_set,
370 			uint32_t nb_mc_addr)
371 {
372 	struct hn_data *hv = dev->data->dev_private;
373 	struct rte_eth_dev *vf_dev;
374 	int ret = 0;
375 
376 	rte_rwlock_read_lock(&hv->vf_lock);
377 	vf_dev = hn_get_vf_dev(hv);
378 	if (vf_dev)
379 		ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
380 						   mc_addr_set, nb_mc_addr);
381 	rte_rwlock_read_unlock(&hv->vf_lock);
382 	return ret;
383 }
384 
hn_vf_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)385 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
386 			 uint16_t queue_idx, uint16_t nb_desc,
387 			 unsigned int socket_id,
388 			 const struct rte_eth_txconf *tx_conf)
389 {
390 	struct hn_data *hv = dev->data->dev_private;
391 	struct rte_eth_dev *vf_dev;
392 	int ret = 0;
393 
394 	rte_rwlock_read_lock(&hv->vf_lock);
395 	vf_dev = hn_get_vf_dev(hv);
396 	if (vf_dev)
397 		ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
398 					     queue_idx, nb_desc,
399 					     socket_id, tx_conf);
400 	rte_rwlock_read_unlock(&hv->vf_lock);
401 	return ret;
402 }
403 
hn_vf_tx_queue_release(struct hn_data * hv,uint16_t queue_id)404 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
405 {
406 	struct rte_eth_dev *vf_dev;
407 
408 	rte_rwlock_read_lock(&hv->vf_lock);
409 	vf_dev = hn_get_vf_dev(hv);
410 	if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
411 		void *subq = vf_dev->data->tx_queues[queue_id];
412 
413 		(*vf_dev->dev_ops->tx_queue_release)(subq);
414 	}
415 
416 	rte_rwlock_read_unlock(&hv->vf_lock);
417 }
418 
hn_vf_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)419 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
420 			 uint16_t queue_idx, uint16_t nb_desc,
421 			 unsigned int socket_id,
422 			 const struct rte_eth_rxconf *rx_conf,
423 			 struct rte_mempool *mp)
424 {
425 	struct hn_data *hv = dev->data->dev_private;
426 	struct rte_eth_dev *vf_dev;
427 	int ret = 0;
428 
429 	rte_rwlock_read_lock(&hv->vf_lock);
430 	vf_dev = hn_get_vf_dev(hv);
431 	if (vf_dev)
432 		ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
433 					     queue_idx, nb_desc,
434 					     socket_id, rx_conf, mp);
435 	rte_rwlock_read_unlock(&hv->vf_lock);
436 	return ret;
437 }
438 
hn_vf_rx_queue_release(struct hn_data * hv,uint16_t queue_id)439 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
440 {
441 	struct rte_eth_dev *vf_dev;
442 
443 	rte_rwlock_read_lock(&hv->vf_lock);
444 	vf_dev = hn_get_vf_dev(hv);
445 	if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
446 		void *subq = vf_dev->data->rx_queues[queue_id];
447 
448 		(*vf_dev->dev_ops->rx_queue_release)(subq);
449 	}
450 	rte_rwlock_read_unlock(&hv->vf_lock);
451 }
452 
hn_vf_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)453 int hn_vf_stats_get(struct rte_eth_dev *dev,
454 		    struct rte_eth_stats *stats)
455 {
456 	struct hn_data *hv = dev->data->dev_private;
457 	struct rte_eth_dev *vf_dev;
458 	int ret = 0;
459 
460 	rte_rwlock_read_lock(&hv->vf_lock);
461 	vf_dev = hn_get_vf_dev(hv);
462 	if (vf_dev)
463 		ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
464 	rte_rwlock_read_unlock(&hv->vf_lock);
465 	return ret;
466 }
467 
hn_vf_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * names,unsigned int n)468 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
469 			   struct rte_eth_xstat_name *names,
470 			   unsigned int n)
471 {
472 	struct hn_data *hv = dev->data->dev_private;
473 	struct rte_eth_dev *vf_dev;
474 	int i, count = 0;
475 
476 	rte_rwlock_read_lock(&hv->vf_lock);
477 	vf_dev = hn_get_vf_dev(hv);
478 	if (vf_dev)
479 		count = rte_eth_xstats_get_names(vf_dev->data->port_id,
480 						 names, n);
481 	rte_rwlock_read_unlock(&hv->vf_lock);
482 
483 	/* add vf_ prefix to xstat names */
484 	if (names) {
485 		for (i = 0; i < count; i++) {
486 			char tmp[RTE_ETH_XSTATS_NAME_SIZE];
487 
488 			snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
489 			strlcpy(names[i].name, tmp, sizeof(names[i].name));
490 		}
491 	}
492 
493 	return count;
494 }
495 
hn_vf_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int offset,unsigned int n)496 int hn_vf_xstats_get(struct rte_eth_dev *dev,
497 		     struct rte_eth_xstat *xstats,
498 		     unsigned int offset,
499 		     unsigned int n)
500 {
501 	struct hn_data *hv = dev->data->dev_private;
502 	struct rte_eth_dev *vf_dev;
503 	int i, count = 0;
504 
505 	rte_rwlock_read_lock(&hv->vf_lock);
506 	vf_dev = hn_get_vf_dev(hv);
507 	if (vf_dev)
508 		count = rte_eth_xstats_get(vf_dev->data->port_id,
509 					   xstats + offset, n - offset);
510 	rte_rwlock_read_unlock(&hv->vf_lock);
511 
512 	/* Offset id's for VF stats */
513 	if (count > 0) {
514 		for (i = 0; i < count; i++)
515 			xstats[i + offset].id += offset;
516 	}
517 
518 	return count;
519 }
520 
hn_vf_xstats_reset(struct rte_eth_dev * dev)521 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
522 {
523 	struct hn_data *hv = dev->data->dev_private;
524 	struct rte_eth_dev *vf_dev;
525 	int ret;
526 
527 	rte_rwlock_read_lock(&hv->vf_lock);
528 	vf_dev = hn_get_vf_dev(hv);
529 	if (vf_dev)
530 		ret = rte_eth_xstats_reset(vf_dev->data->port_id);
531 	else
532 		ret = -EINVAL;
533 	rte_rwlock_read_unlock(&hv->vf_lock);
534 
535 	return ret;
536 }
537 
hn_vf_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)538 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
539 			  struct rte_eth_rss_conf *rss_conf)
540 {
541 	struct hn_data *hv = dev->data->dev_private;
542 	struct rte_eth_dev *vf_dev;
543 	int ret = 0;
544 
545 	rte_rwlock_read_lock(&hv->vf_lock);
546 	vf_dev = hn_get_vf_dev(hv);
547 	if (vf_dev && vf_dev->dev_ops->rss_hash_update)
548 		ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
549 	rte_rwlock_read_unlock(&hv->vf_lock);
550 
551 	return ret;
552 }
553 
hn_vf_reta_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)554 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
555 			   struct rte_eth_rss_reta_entry64 *reta_conf,
556 			   uint16_t reta_size)
557 {
558 	struct hn_data *hv = dev->data->dev_private;
559 	struct rte_eth_dev *vf_dev;
560 	int ret = 0;
561 
562 	rte_rwlock_read_lock(&hv->vf_lock);
563 	vf_dev = hn_get_vf_dev(hv);
564 	if (vf_dev && vf_dev->dev_ops->reta_update)
565 		ret = vf_dev->dev_ops->reta_update(vf_dev,
566 						   reta_conf, reta_size);
567 	rte_rwlock_read_unlock(&hv->vf_lock);
568 
569 	return ret;
570 }
571