xref: /f-stack/dpdk/drivers/net/ice/ice_dcf.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 
16 #include <rte_pci.h>
17 #include <rte_atomic.h>
18 #include <rte_eal.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_malloc.h>
23 #include <rte_memzone.h>
24 #include <rte_dev.h>
25 
26 #include "ice_dcf.h"
27 #include "ice_rxtx.h"
28 
29 #define ICE_DCF_AQ_LEN     32
30 #define ICE_DCF_AQ_BUF_SZ  4096
31 
32 #define ICE_DCF_ARQ_MAX_RETRIES 200
33 #define ICE_DCF_ARQ_CHECK_TIME  2   /* msecs */
34 
35 #define ICE_DCF_VF_RES_BUF_SZ	\
36 	(sizeof(struct virtchnl_vf_resource) +	\
37 		IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
38 
39 static __rte_always_inline int
ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw * hw,enum virtchnl_ops op,uint8_t * req_msg,uint16_t req_msglen)40 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
41 			    uint8_t *req_msg, uint16_t req_msglen)
42 {
43 	return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
44 				      req_msg, req_msglen, NULL);
45 }
46 
47 static int
ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw * hw,enum virtchnl_ops op,uint8_t * rsp_msgbuf,uint16_t rsp_buflen,uint16_t * rsp_msglen)48 ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
49 			    uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
50 			    uint16_t *rsp_msglen)
51 {
52 	struct iavf_arq_event_info event;
53 	enum virtchnl_ops v_op;
54 	int i = 0;
55 	int err;
56 
57 	event.buf_len = rsp_buflen;
58 	event.msg_buf = rsp_msgbuf;
59 
60 	do {
61 		err = iavf_clean_arq_element(&hw->avf, &event, NULL);
62 		if (err != IAVF_SUCCESS)
63 			goto again;
64 
65 		v_op = rte_le_to_cpu_32(event.desc.cookie_high);
66 		if (v_op != op)
67 			goto again;
68 
69 		if (rsp_msglen != NULL)
70 			*rsp_msglen = event.msg_len;
71 		return rte_le_to_cpu_32(event.desc.cookie_low);
72 
73 again:
74 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
75 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
76 
77 	return -EIO;
78 }
79 
80 static __rte_always_inline void
ice_dcf_aq_cmd_clear(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)81 ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
82 {
83 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
84 
85 	TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
86 
87 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
88 }
89 
90 static __rte_always_inline void
ice_dcf_vc_cmd_set(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)91 ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
92 {
93 	cmd->v_ret = IAVF_ERR_NOT_READY;
94 	cmd->rsp_msglen = 0;
95 	cmd->pending = 1;
96 
97 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
98 
99 	TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
100 
101 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
102 }
103 
104 static __rte_always_inline int
ice_dcf_vc_cmd_send(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)105 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
106 {
107 	return iavf_aq_send_msg_to_pf(&hw->avf,
108 				      cmd->v_op, IAVF_SUCCESS,
109 				      cmd->req_msg, cmd->req_msglen, NULL);
110 }
111 
112 static __rte_always_inline void
ice_dcf_aq_cmd_handle(struct ice_dcf_hw * hw,struct iavf_arq_event_info * info)113 ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
114 {
115 	struct dcf_virtchnl_cmd *cmd;
116 	enum virtchnl_ops v_op;
117 	enum iavf_status v_ret;
118 	uint16_t aq_op;
119 
120 	aq_op = rte_le_to_cpu_16(info->desc.opcode);
121 	if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
122 		PMD_DRV_LOG(ERR,
123 			    "Request %u is not supported yet", aq_op);
124 		return;
125 	}
126 
127 	v_op = rte_le_to_cpu_32(info->desc.cookie_high);
128 	if (v_op == VIRTCHNL_OP_EVENT) {
129 		if (hw->vc_event_msg_cb != NULL)
130 			hw->vc_event_msg_cb(hw,
131 					    info->msg_buf,
132 					    info->msg_len);
133 		return;
134 	}
135 
136 	v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
137 
138 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
139 
140 	TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
141 		if (cmd->v_op == v_op && cmd->pending) {
142 			cmd->v_ret = v_ret;
143 			cmd->rsp_msglen = RTE_MIN(info->msg_len,
144 						  cmd->rsp_buflen);
145 			if (likely(cmd->rsp_msglen != 0))
146 				rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
147 					   cmd->rsp_msglen);
148 
149 			/* prevent compiler reordering */
150 			rte_compiler_barrier();
151 			cmd->pending = 0;
152 			break;
153 		}
154 	}
155 
156 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
157 }
158 
159 static void
ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw * hw)160 ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
161 {
162 	struct iavf_arq_event_info info;
163 	uint16_t pending = 1;
164 	int ret;
165 
166 	info.buf_len = ICE_DCF_AQ_BUF_SZ;
167 	info.msg_buf = hw->arq_buf;
168 
169 	while (pending) {
170 		ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
171 		if (ret != IAVF_SUCCESS)
172 			break;
173 
174 		ice_dcf_aq_cmd_handle(hw, &info);
175 	}
176 }
177 
178 static int
ice_dcf_init_check_api_version(struct ice_dcf_hw * hw)179 ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
180 {
181 #define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START	1
182 #define ICE_CPF_VIRTCHNL_VERSION_MINOR_START	1
183 	struct virtchnl_version_info version, *pver;
184 	int err;
185 
186 	version.major = VIRTCHNL_VERSION_MAJOR;
187 	version.minor = VIRTCHNL_VERSION_MINOR;
188 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
189 					  (uint8_t *)&version, sizeof(version));
190 	if (err) {
191 		PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
192 		return err;
193 	}
194 
195 	pver = &hw->virtchnl_version;
196 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
197 					  (uint8_t *)pver, sizeof(*pver), NULL);
198 	if (err) {
199 		PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
200 		return -1;
201 	}
202 
203 	PMD_INIT_LOG(DEBUG,
204 		     "Peer PF API version: %u.%u", pver->major, pver->minor);
205 
206 	if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
207 	    (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
208 	     pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
209 		PMD_INIT_LOG(ERR,
210 			     "VIRTCHNL API version should not be lower than (%u.%u)",
211 			     ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
212 			     ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
213 		return -1;
214 	} else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
215 		   (pver->major == VIRTCHNL_VERSION_MAJOR &&
216 		    pver->minor > VIRTCHNL_VERSION_MINOR)) {
217 		PMD_INIT_LOG(ERR,
218 			     "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
219 			     pver->major, pver->minor,
220 			     VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
221 		return -1;
222 	}
223 
224 	PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
225 
226 	return 0;
227 }
228 
229 static int
ice_dcf_get_vf_resource(struct ice_dcf_hw * hw)230 ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
231 {
232 	uint32_t caps;
233 	int err, i;
234 
235 	caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
236 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
237 	       VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
238 
239 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
240 					  (uint8_t *)&caps, sizeof(caps));
241 	if (err) {
242 		PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
243 		return err;
244 	}
245 
246 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
247 					  (uint8_t *)hw->vf_res,
248 					  ICE_DCF_VF_RES_BUF_SZ, NULL);
249 	if (err) {
250 		PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
251 		return -1;
252 	}
253 
254 	iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
255 
256 	hw->vsi_res = NULL;
257 	for (i = 0; i < hw->vf_res->num_vsis; i++) {
258 		if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
259 			hw->vsi_res = &hw->vf_res->vsi_res[i];
260 	}
261 
262 	if (!hw->vsi_res) {
263 		PMD_DRV_LOG(ERR, "no LAN VSI found");
264 		return -1;
265 	}
266 
267 	hw->vsi_id = hw->vsi_res->vsi_id;
268 	PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
269 
270 	return 0;
271 }
272 
273 static int
ice_dcf_get_vf_vsi_map(struct ice_dcf_hw * hw)274 ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
275 {
276 	struct virtchnl_dcf_vsi_map *vsi_map;
277 	uint32_t valid_msg_len;
278 	uint16_t len;
279 	int err;
280 
281 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
282 					  NULL, 0);
283 	if (err) {
284 		PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
285 		return err;
286 	}
287 
288 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
289 					  hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
290 					  &len);
291 	if (err) {
292 		PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
293 		return err;
294 	}
295 
296 	vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
297 	valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
298 			sizeof(*vsi_map);
299 	if (len != valid_msg_len) {
300 		PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
301 			    len);
302 		return -EINVAL;
303 	}
304 
305 	if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
306 		PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
307 			    vsi_map->num_vfs, hw->num_vfs);
308 		return -EINVAL;
309 	}
310 
311 	len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
312 
313 	if (!hw->vf_vsi_map) {
314 		hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
315 		if (!hw->vf_vsi_map) {
316 			PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
317 			return -ENOMEM;
318 		}
319 
320 		hw->num_vfs = vsi_map->num_vfs;
321 		hw->pf_vsi_id = vsi_map->pf_vsi;
322 	}
323 
324 	if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
325 		PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
326 		return 1;
327 	}
328 
329 	rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
330 	return 0;
331 }
332 
333 static int
ice_dcf_mode_disable(struct ice_dcf_hw * hw)334 ice_dcf_mode_disable(struct ice_dcf_hw *hw)
335 {
336 	int err;
337 
338 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
339 					  NULL, 0);
340 	if (err) {
341 		PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
342 		return err;
343 	}
344 
345 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
346 					  hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
347 	if (err) {
348 		PMD_DRV_LOG(ERR,
349 			    "Failed to get response of OP_DCF_DISABLE %d",
350 			    err);
351 		return -1;
352 	}
353 
354 	return 0;
355 }
356 
357 static int
ice_dcf_check_reset_done(struct ice_dcf_hw * hw)358 ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
359 {
360 #define ICE_DCF_RESET_WAIT_CNT       50
361 	struct iavf_hw *avf = &hw->avf;
362 	int i, reset;
363 
364 	for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
365 		reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
366 					IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
367 		reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
368 
369 		if (reset == VIRTCHNL_VFR_VFACTIVE ||
370 		    reset == VIRTCHNL_VFR_COMPLETED)
371 			break;
372 
373 		rte_delay_ms(20);
374 	}
375 
376 	if (i >= ICE_DCF_RESET_WAIT_CNT)
377 		return -1;
378 
379 	return 0;
380 }
381 
382 static inline void
ice_dcf_enable_irq0(struct ice_dcf_hw * hw)383 ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
384 {
385 	struct iavf_hw *avf = &hw->avf;
386 
387 	/* Enable admin queue interrupt trigger */
388 	IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
389 		       IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
390 	IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
391 		       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
392 		       IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
393 		       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
394 
395 	IAVF_WRITE_FLUSH(avf);
396 }
397 
398 static inline void
ice_dcf_disable_irq0(struct ice_dcf_hw * hw)399 ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
400 {
401 	struct iavf_hw *avf = &hw->avf;
402 
403 	/* Disable all interrupt types */
404 	IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
405 	IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
406 		       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
407 
408 	IAVF_WRITE_FLUSH(avf);
409 }
410 
411 static void
ice_dcf_dev_interrupt_handler(void * param)412 ice_dcf_dev_interrupt_handler(void *param)
413 {
414 	struct ice_dcf_hw *hw = param;
415 
416 	ice_dcf_disable_irq0(hw);
417 
418 	ice_dcf_handle_virtchnl_msg(hw);
419 
420 	ice_dcf_enable_irq0(hw);
421 }
422 
423 int
ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)424 ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
425 			     struct dcf_virtchnl_cmd *cmd)
426 {
427 	int i = 0;
428 	int err;
429 
430 	if ((cmd->req_msg && !cmd->req_msglen) ||
431 	    (!cmd->req_msg && cmd->req_msglen) ||
432 	    (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
433 	    (!cmd->rsp_msgbuf && cmd->rsp_buflen))
434 		return -EINVAL;
435 
436 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
437 	ice_dcf_vc_cmd_set(hw, cmd);
438 
439 	err = ice_dcf_vc_cmd_send(hw, cmd);
440 	if (err) {
441 		PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
442 		goto ret;
443 	}
444 
445 	do {
446 		if (!cmd->pending)
447 			break;
448 
449 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
450 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
451 
452 	if (cmd->v_ret != IAVF_SUCCESS) {
453 		err = -1;
454 		PMD_DRV_LOG(ERR,
455 			    "No response (%d times) or return failure (%d) for cmd %d",
456 			    i, cmd->v_ret, cmd->v_op);
457 	}
458 
459 ret:
460 	ice_dcf_aq_cmd_clear(hw, cmd);
461 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
462 	return err;
463 }
464 
465 int
ice_dcf_send_aq_cmd(void * dcf_hw,struct ice_aq_desc * desc,void * buf,uint16_t buf_size)466 ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
467 		    void *buf, uint16_t buf_size)
468 {
469 	struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
470 	struct ice_dcf_hw *hw = dcf_hw;
471 	int err = 0;
472 	int i = 0;
473 
474 	if ((buf && !buf_size) || (!buf && buf_size) ||
475 	    buf_size > ICE_DCF_AQ_BUF_SZ)
476 		return -EINVAL;
477 
478 	desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
479 	desc_cmd.req_msglen = sizeof(*desc);
480 	desc_cmd.req_msg = (uint8_t *)desc;
481 	desc_cmd.rsp_buflen = sizeof(*desc);
482 	desc_cmd.rsp_msgbuf = (uint8_t *)desc;
483 
484 	if (buf == NULL)
485 		return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
486 
487 	desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
488 
489 	buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
490 	buff_cmd.req_msglen = buf_size;
491 	buff_cmd.req_msg = buf;
492 	buff_cmd.rsp_buflen = buf_size;
493 	buff_cmd.rsp_msgbuf = buf;
494 
495 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
496 	ice_dcf_vc_cmd_set(hw, &desc_cmd);
497 	ice_dcf_vc_cmd_set(hw, &buff_cmd);
498 
499 	if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
500 	    ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
501 		err = -1;
502 		PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
503 		goto ret;
504 	}
505 
506 	do {
507 		if ((!desc_cmd.pending && !buff_cmd.pending) ||
508 		    (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
509 		    (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
510 			break;
511 
512 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
513 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
514 
515 	if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
516 		err = -1;
517 		PMD_DRV_LOG(ERR,
518 			    "No response (%d times) or return failure (desc: %d / buff: %d)",
519 			    i, desc_cmd.v_ret, buff_cmd.v_ret);
520 	}
521 
522 ret:
523 	ice_dcf_aq_cmd_clear(hw, &desc_cmd);
524 	ice_dcf_aq_cmd_clear(hw, &buff_cmd);
525 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
526 
527 	return err;
528 }
529 
530 int
ice_dcf_handle_vsi_update_event(struct ice_dcf_hw * hw)531 ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
532 {
533 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
534 	int err = 0;
535 
536 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
537 
538 	rte_intr_disable(&pci_dev->intr_handle);
539 	ice_dcf_disable_irq0(hw);
540 
541 	if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw) < 0)
542 		err = -1;
543 
544 	rte_intr_enable(&pci_dev->intr_handle);
545 	ice_dcf_enable_irq0(hw);
546 
547 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
548 
549 	return err;
550 }
551 
552 static int
ice_dcf_get_supported_rxdid(struct ice_dcf_hw * hw)553 ice_dcf_get_supported_rxdid(struct ice_dcf_hw *hw)
554 {
555 	int err;
556 
557 	err = ice_dcf_send_cmd_req_no_irq(hw,
558 					  VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
559 					  NULL, 0);
560 	if (err) {
561 		PMD_INIT_LOG(ERR, "Failed to send OP_GET_SUPPORTED_RXDIDS");
562 		return -1;
563 	}
564 
565 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
566 					  (uint8_t *)&hw->supported_rxdid,
567 					  sizeof(uint64_t), NULL);
568 	if (err) {
569 		PMD_INIT_LOG(ERR, "Failed to get response of OP_GET_SUPPORTED_RXDIDS");
570 		return -1;
571 	}
572 
573 	return 0;
574 }
575 
576 int
ice_dcf_init_hw(struct rte_eth_dev * eth_dev,struct ice_dcf_hw * hw)577 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
578 {
579 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
580 	int ret;
581 
582 	hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
583 	hw->avf.back = hw;
584 
585 	hw->avf.bus.bus_id = pci_dev->addr.bus;
586 	hw->avf.bus.device = pci_dev->addr.devid;
587 	hw->avf.bus.func = pci_dev->addr.function;
588 
589 	hw->avf.device_id = pci_dev->id.device_id;
590 	hw->avf.vendor_id = pci_dev->id.vendor_id;
591 	hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
592 	hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
593 
594 	hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
595 	hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
596 	hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
597 	hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
598 
599 	rte_spinlock_init(&hw->vc_cmd_send_lock);
600 	rte_spinlock_init(&hw->vc_cmd_queue_lock);
601 	TAILQ_INIT(&hw->vc_cmd_queue);
602 
603 	hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
604 	if (hw->arq_buf == NULL) {
605 		PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
606 		goto err;
607 	}
608 
609 	ret = iavf_set_mac_type(&hw->avf);
610 	if (ret) {
611 		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
612 		goto err;
613 	}
614 
615 	ret = ice_dcf_check_reset_done(hw);
616 	if (ret) {
617 		PMD_INIT_LOG(ERR, "VF is still resetting");
618 		goto err;
619 	}
620 
621 	ret = iavf_init_adminq(&hw->avf);
622 	if (ret) {
623 		PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
624 		goto err;
625 	}
626 
627 	if (ice_dcf_init_check_api_version(hw)) {
628 		PMD_INIT_LOG(ERR, "check_api version failed");
629 		goto err_api;
630 	}
631 
632 	hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
633 	if (hw->vf_res == NULL) {
634 		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
635 		goto err_api;
636 	}
637 
638 	if (ice_dcf_get_vf_resource(hw)) {
639 		PMD_INIT_LOG(ERR, "Failed to get VF resource");
640 		goto err_alloc;
641 	}
642 
643 	if (ice_dcf_get_vf_vsi_map(hw) < 0) {
644 		PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
645 		ice_dcf_mode_disable(hw);
646 		goto err_alloc;
647 	}
648 
649 	/* Allocate memory for RSS info */
650 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
651 		hw->rss_key = rte_zmalloc(NULL,
652 					  hw->vf_res->rss_key_size, 0);
653 		if (!hw->rss_key) {
654 			PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
655 			goto err_alloc;
656 		}
657 		hw->rss_lut = rte_zmalloc("rss_lut",
658 					  hw->vf_res->rss_lut_size, 0);
659 		if (!hw->rss_lut) {
660 			PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
661 			goto err_rss;
662 		}
663 	}
664 
665 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
666 		if (ice_dcf_get_supported_rxdid(hw) != 0) {
667 			PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
668 			goto err_rss;
669 		}
670 	}
671 
672 	hw->eth_dev = eth_dev;
673 	rte_intr_callback_register(&pci_dev->intr_handle,
674 				   ice_dcf_dev_interrupt_handler, hw);
675 	rte_intr_enable(&pci_dev->intr_handle);
676 	ice_dcf_enable_irq0(hw);
677 
678 	return 0;
679 
680 err_rss:
681 	rte_free(hw->rss_key);
682 	rte_free(hw->rss_lut);
683 err_alloc:
684 	rte_free(hw->vf_res);
685 err_api:
686 	iavf_shutdown_adminq(&hw->avf);
687 err:
688 	rte_free(hw->arq_buf);
689 
690 	return -1;
691 }
692 
693 void
ice_dcf_uninit_hw(struct rte_eth_dev * eth_dev,struct ice_dcf_hw * hw)694 ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
695 {
696 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
697 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
698 
699 	ice_dcf_disable_irq0(hw);
700 	rte_intr_disable(intr_handle);
701 	rte_intr_callback_unregister(intr_handle,
702 				     ice_dcf_dev_interrupt_handler, hw);
703 
704 	ice_dcf_mode_disable(hw);
705 	iavf_shutdown_adminq(&hw->avf);
706 
707 	rte_free(hw->arq_buf);
708 	rte_free(hw->vf_vsi_map);
709 	rte_free(hw->vf_res);
710 	rte_free(hw->rss_lut);
711 	rte_free(hw->rss_key);
712 }
713 
714 static int
ice_dcf_configure_rss_key(struct ice_dcf_hw * hw)715 ice_dcf_configure_rss_key(struct ice_dcf_hw *hw)
716 {
717 	struct virtchnl_rss_key *rss_key;
718 	struct dcf_virtchnl_cmd args;
719 	int len, err;
720 
721 	len = sizeof(*rss_key) + hw->vf_res->rss_key_size - 1;
722 	rss_key = rte_zmalloc("rss_key", len, 0);
723 	if (!rss_key)
724 		return -ENOMEM;
725 
726 	rss_key->vsi_id = hw->vsi_res->vsi_id;
727 	rss_key->key_len = hw->vf_res->rss_key_size;
728 	rte_memcpy(rss_key->key, hw->rss_key, hw->vf_res->rss_key_size);
729 
730 	args.v_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
731 	args.req_msglen = len;
732 	args.req_msg = (uint8_t *)rss_key;
733 	args.rsp_msglen = 0;
734 	args.rsp_buflen = 0;
735 	args.rsp_msgbuf = NULL;
736 	args.pending = 0;
737 
738 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
739 	if (err)
740 		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_KEY");
741 
742 	rte_free(rss_key);
743 	return err;
744 }
745 
746 static int
ice_dcf_configure_rss_lut(struct ice_dcf_hw * hw)747 ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw)
748 {
749 	struct virtchnl_rss_lut *rss_lut;
750 	struct dcf_virtchnl_cmd args;
751 	int len, err;
752 
753 	len = sizeof(*rss_lut) + hw->vf_res->rss_lut_size - 1;
754 	rss_lut = rte_zmalloc("rss_lut", len, 0);
755 	if (!rss_lut)
756 		return -ENOMEM;
757 
758 	rss_lut->vsi_id = hw->vsi_res->vsi_id;
759 	rss_lut->lut_entries = hw->vf_res->rss_lut_size;
760 	rte_memcpy(rss_lut->lut, hw->rss_lut, hw->vf_res->rss_lut_size);
761 
762 	args.v_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
763 	args.req_msglen = len;
764 	args.req_msg = (uint8_t *)rss_lut;
765 	args.rsp_msglen = 0;
766 	args.rsp_buflen = 0;
767 	args.rsp_msgbuf = NULL;
768 	args.pending = 0;
769 
770 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
771 	if (err)
772 		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_LUT");
773 
774 	rte_free(rss_lut);
775 	return err;
776 }
777 
778 int
ice_dcf_init_rss(struct ice_dcf_hw * hw)779 ice_dcf_init_rss(struct ice_dcf_hw *hw)
780 {
781 	struct rte_eth_dev *dev = hw->eth_dev;
782 	struct rte_eth_rss_conf *rss_conf;
783 	uint8_t i, j, nb_q;
784 	int ret;
785 
786 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
787 	nb_q = dev->data->nb_rx_queues;
788 
789 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
790 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
791 		return -ENOTSUP;
792 	}
793 	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
794 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
795 		/* set all lut items to default queue */
796 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
797 		return ice_dcf_configure_rss_lut(hw);
798 	}
799 
800 	/* In IAVF, RSS enablement is set by PF driver. It is not supported
801 	 * to set based on rss_conf->rss_hf.
802 	 */
803 
804 	/* configure RSS key */
805 	if (!rss_conf->rss_key)
806 		/* Calculate the default hash key */
807 		for (i = 0; i < hw->vf_res->rss_key_size; i++)
808 			hw->rss_key[i] = (uint8_t)rte_rand();
809 	else
810 		rte_memcpy(hw->rss_key, rss_conf->rss_key,
811 			   RTE_MIN(rss_conf->rss_key_len,
812 				   hw->vf_res->rss_key_size));
813 
814 	/* init RSS LUT table */
815 	for (i = 0, j = 0; i < hw->vf_res->rss_lut_size; i++, j++) {
816 		if (j >= nb_q)
817 			j = 0;
818 		hw->rss_lut[i] = j;
819 	}
820 	/* send virtchnnl ops to configure rss*/
821 	ret = ice_dcf_configure_rss_lut(hw);
822 	if (ret)
823 		return ret;
824 	ret = ice_dcf_configure_rss_key(hw);
825 	if (ret)
826 		return ret;
827 
828 	return 0;
829 }
830 
831 #define IAVF_RXDID_LEGACY_0 0
832 #define IAVF_RXDID_LEGACY_1 1
833 #define IAVF_RXDID_COMMS_GENERIC 16
834 
835 int
ice_dcf_configure_queues(struct ice_dcf_hw * hw)836 ice_dcf_configure_queues(struct ice_dcf_hw *hw)
837 {
838 	struct ice_rx_queue **rxq =
839 		(struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
840 	struct ice_tx_queue **txq =
841 		(struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
842 	struct virtchnl_vsi_queue_config_info *vc_config;
843 	struct virtchnl_queue_pair_info *vc_qp;
844 	struct dcf_virtchnl_cmd args;
845 	uint16_t i, size;
846 	int err;
847 
848 	size = sizeof(*vc_config) +
849 	       sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
850 	vc_config = rte_zmalloc("cfg_queue", size, 0);
851 	if (!vc_config)
852 		return -ENOMEM;
853 
854 	vc_config->vsi_id = hw->vsi_res->vsi_id;
855 	vc_config->num_queue_pairs = hw->num_queue_pairs;
856 
857 	for (i = 0, vc_qp = vc_config->qpair;
858 	     i < hw->num_queue_pairs;
859 	     i++, vc_qp++) {
860 		vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
861 		vc_qp->txq.queue_id = i;
862 		if (i < hw->eth_dev->data->nb_tx_queues) {
863 			vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
864 			vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
865 		}
866 		vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
867 		vc_qp->rxq.queue_id = i;
868 		vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
869 
870 		if (i >= hw->eth_dev->data->nb_rx_queues)
871 			continue;
872 
873 		vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
874 		vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
875 		vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
876 
877 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
878 		if (hw->vf_res->vf_cap_flags &
879 		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
880 		    hw->supported_rxdid &
881 		    BIT(IAVF_RXDID_COMMS_GENERIC)) {
882 			vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
883 			PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
884 				    "Queue[%d]", vc_qp->rxq.rxdid, i);
885 		} else {
886 			PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
887 			return -EINVAL;
888 		}
889 #else
890 		if (hw->vf_res->vf_cap_flags &
891 			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
892 			hw->supported_rxdid &
893 			BIT(IAVF_RXDID_LEGACY_0)) {
894 			vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
895 			PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
896 					"Queue[%d]", vc_qp->rxq.rxdid, i);
897 		} else {
898 			PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
899 			return -EINVAL;
900 		}
901 #endif
902 		ice_select_rxd_to_pkt_fields_handler(rxq[i], vc_qp->rxq.rxdid);
903 	}
904 
905 	memset(&args, 0, sizeof(args));
906 	args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
907 	args.req_msg = (uint8_t *)vc_config;
908 	args.req_msglen = size;
909 
910 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
911 	if (err)
912 		PMD_DRV_LOG(ERR, "Failed to execute command of"
913 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
914 
915 	rte_free(vc_config);
916 	return err;
917 }
918 
919 int
ice_dcf_config_irq_map(struct ice_dcf_hw * hw)920 ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
921 {
922 	struct virtchnl_irq_map_info *map_info;
923 	struct virtchnl_vector_map *vecmap;
924 	struct dcf_virtchnl_cmd args;
925 	int len, i, err;
926 
927 	len = sizeof(struct virtchnl_irq_map_info) +
928 	      sizeof(struct virtchnl_vector_map) * hw->nb_msix;
929 
930 	map_info = rte_zmalloc("map_info", len, 0);
931 	if (!map_info)
932 		return -ENOMEM;
933 
934 	map_info->num_vectors = hw->nb_msix;
935 	for (i = 0; i < hw->nb_msix; i++) {
936 		vecmap = &map_info->vecmap[i];
937 		vecmap->vsi_id = hw->vsi_res->vsi_id;
938 		vecmap->rxitr_idx = 0;
939 		vecmap->vector_id = hw->msix_base + i;
940 		vecmap->txq_map = 0;
941 		vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
942 	}
943 
944 	memset(&args, 0, sizeof(args));
945 	args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
946 	args.req_msg = (u8 *)map_info;
947 	args.req_msglen = len;
948 
949 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
950 	if (err)
951 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
952 
953 	rte_free(map_info);
954 	return err;
955 }
956 
957 int
ice_dcf_switch_queue(struct ice_dcf_hw * hw,uint16_t qid,bool rx,bool on)958 ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
959 {
960 	struct virtchnl_queue_select queue_select;
961 	struct dcf_virtchnl_cmd args;
962 	int err;
963 
964 	memset(&queue_select, 0, sizeof(queue_select));
965 	queue_select.vsi_id = hw->vsi_res->vsi_id;
966 	if (rx)
967 		queue_select.rx_queues |= 1 << qid;
968 	else
969 		queue_select.tx_queues |= 1 << qid;
970 
971 	memset(&args, 0, sizeof(args));
972 	if (on)
973 		args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
974 	else
975 		args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
976 
977 	args.req_msg = (u8 *)&queue_select;
978 	args.req_msglen = sizeof(queue_select);
979 
980 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
981 	if (err)
982 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
983 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
984 
985 	return err;
986 }
987 
988 int
ice_dcf_disable_queues(struct ice_dcf_hw * hw)989 ice_dcf_disable_queues(struct ice_dcf_hw *hw)
990 {
991 	struct virtchnl_queue_select queue_select;
992 	struct dcf_virtchnl_cmd args;
993 	int err;
994 
995 	memset(&queue_select, 0, sizeof(queue_select));
996 	queue_select.vsi_id = hw->vsi_res->vsi_id;
997 
998 	queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
999 	queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
1000 
1001 	memset(&args, 0, sizeof(args));
1002 	args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
1003 	args.req_msg = (u8 *)&queue_select;
1004 	args.req_msglen = sizeof(queue_select);
1005 
1006 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1007 	if (err)
1008 		PMD_DRV_LOG(ERR,
1009 			    "Failed to execute command of OP_DISABLE_QUEUES");
1010 
1011 	return err;
1012 }
1013 
1014 int
ice_dcf_query_stats(struct ice_dcf_hw * hw,struct virtchnl_eth_stats * pstats)1015 ice_dcf_query_stats(struct ice_dcf_hw *hw,
1016 				   struct virtchnl_eth_stats *pstats)
1017 {
1018 	struct virtchnl_queue_select q_stats;
1019 	struct dcf_virtchnl_cmd args;
1020 	int err;
1021 
1022 	memset(&q_stats, 0, sizeof(q_stats));
1023 	q_stats.vsi_id = hw->vsi_res->vsi_id;
1024 
1025 	args.v_op = VIRTCHNL_OP_GET_STATS;
1026 	args.req_msg = (uint8_t *)&q_stats;
1027 	args.req_msglen = sizeof(q_stats);
1028 	args.rsp_msglen = sizeof(*pstats);
1029 	args.rsp_msgbuf = (uint8_t *)pstats;
1030 	args.rsp_buflen = sizeof(*pstats);
1031 
1032 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1033 	if (err) {
1034 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
1035 		return err;
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 int
ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw * hw,bool add)1042 ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add)
1043 {
1044 	struct virtchnl_ether_addr_list *list;
1045 	struct rte_ether_addr *addr;
1046 	struct dcf_virtchnl_cmd args;
1047 	int len, err = 0;
1048 
1049 	len = sizeof(struct virtchnl_ether_addr_list);
1050 	addr = hw->eth_dev->data->mac_addrs;
1051 	len += sizeof(struct virtchnl_ether_addr);
1052 
1053 	list = rte_zmalloc(NULL, len, 0);
1054 	if (!list) {
1055 		PMD_DRV_LOG(ERR, "fail to allocate memory");
1056 		return -ENOMEM;
1057 	}
1058 
1059 	rte_memcpy(list->list[0].addr, addr->addr_bytes,
1060 			sizeof(addr->addr_bytes));
1061 	PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
1062 			    addr->addr_bytes[0], addr->addr_bytes[1],
1063 			    addr->addr_bytes[2], addr->addr_bytes[3],
1064 			    addr->addr_bytes[4], addr->addr_bytes[5]);
1065 
1066 	list->vsi_id = hw->vsi_res->vsi_id;
1067 	list->num_elements = 1;
1068 
1069 	memset(&args, 0, sizeof(args));
1070 	args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1071 			VIRTCHNL_OP_DEL_ETH_ADDR;
1072 	args.req_msg = (uint8_t *)list;
1073 	args.req_msglen  = len;
1074 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1075 	if (err)
1076 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1077 			    add ? "OP_ADD_ETHER_ADDRESS" :
1078 			    "OP_DEL_ETHER_ADDRESS");
1079 	rte_free(list);
1080 	return err;
1081 }
1082