xref: /dpdk/drivers/net/ice/ice_dcf.c (revision 22f39073)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 
16 #include <rte_pci.h>
17 #include <rte_atomic.h>
18 #include <rte_eal.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_malloc.h>
23 #include <rte_memzone.h>
24 #include <rte_dev.h>
25 
26 #include "ice_dcf.h"
27 #include "ice_rxtx.h"
28 
29 #define ICE_DCF_AQ_LEN     32
30 #define ICE_DCF_AQ_BUF_SZ  4096
31 
32 #define ICE_DCF_ARQ_MAX_RETRIES 200
33 #define ICE_DCF_ARQ_CHECK_TIME  2   /* msecs */
34 
35 #define ICE_DCF_VF_RES_BUF_SZ	\
36 	(sizeof(struct virtchnl_vf_resource) +	\
37 		IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
38 
39 static __rte_always_inline int
ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw * hw,enum virtchnl_ops op,uint8_t * req_msg,uint16_t req_msglen)40 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
41 			    uint8_t *req_msg, uint16_t req_msglen)
42 {
43 	return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
44 				      req_msg, req_msglen, NULL);
45 }
46 
47 static int
ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw * hw,enum virtchnl_ops op,uint8_t * rsp_msgbuf,uint16_t rsp_buflen,uint16_t * rsp_msglen)48 ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
49 			    uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
50 			    uint16_t *rsp_msglen)
51 {
52 	struct iavf_arq_event_info event;
53 	enum virtchnl_ops v_op;
54 	int i = 0;
55 	int err;
56 
57 	event.buf_len = rsp_buflen;
58 	event.msg_buf = rsp_msgbuf;
59 
60 	do {
61 		err = iavf_clean_arq_element(&hw->avf, &event, NULL);
62 		if (err != IAVF_SUCCESS)
63 			goto again;
64 
65 		v_op = rte_le_to_cpu_32(event.desc.cookie_high);
66 		if (v_op != op)
67 			goto again;
68 
69 		if (rsp_msglen != NULL)
70 			*rsp_msglen = event.msg_len;
71 		return rte_le_to_cpu_32(event.desc.cookie_low);
72 
73 again:
74 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
75 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
76 
77 	return -EIO;
78 }
79 
80 static __rte_always_inline void
ice_dcf_aq_cmd_clear(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)81 ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
82 {
83 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
84 
85 	TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
86 
87 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
88 }
89 
90 static __rte_always_inline void
ice_dcf_vc_cmd_set(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)91 ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
92 {
93 	cmd->v_ret = IAVF_ERR_NOT_READY;
94 	cmd->rsp_msglen = 0;
95 	cmd->pending = 1;
96 
97 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
98 
99 	TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
100 
101 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
102 }
103 
104 static __rte_always_inline int
ice_dcf_vc_cmd_send(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)105 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
106 {
107 	return iavf_aq_send_msg_to_pf(&hw->avf,
108 				      cmd->v_op, IAVF_SUCCESS,
109 				      cmd->req_msg, cmd->req_msglen, NULL);
110 }
111 
112 static __rte_always_inline void
ice_dcf_aq_cmd_handle(struct ice_dcf_hw * hw,struct iavf_arq_event_info * info)113 ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
114 {
115 	struct dcf_virtchnl_cmd *cmd;
116 	enum virtchnl_ops v_op;
117 	enum iavf_status v_ret;
118 	uint16_t aq_op;
119 
120 	aq_op = rte_le_to_cpu_16(info->desc.opcode);
121 	if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
122 		PMD_DRV_LOG(ERR,
123 			    "Request %u is not supported yet", aq_op);
124 		return;
125 	}
126 
127 	v_op = rte_le_to_cpu_32(info->desc.cookie_high);
128 	if (v_op == VIRTCHNL_OP_EVENT) {
129 		if (hw->vc_event_msg_cb != NULL)
130 			hw->vc_event_msg_cb(hw,
131 					    info->msg_buf,
132 					    info->msg_len);
133 		return;
134 	}
135 
136 	v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
137 
138 	rte_spinlock_lock(&hw->vc_cmd_queue_lock);
139 
140 	TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
141 		if (cmd->v_op == v_op && cmd->pending) {
142 			cmd->v_ret = v_ret;
143 			cmd->rsp_msglen = RTE_MIN(info->msg_len,
144 						  cmd->rsp_buflen);
145 			if (likely(cmd->rsp_msglen != 0))
146 				rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
147 					   cmd->rsp_msglen);
148 
149 			/* prevent compiler reordering */
150 			rte_compiler_barrier();
151 			cmd->pending = 0;
152 			break;
153 		}
154 	}
155 
156 	rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
157 }
158 
159 static void
ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw * hw)160 ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
161 {
162 	struct iavf_arq_event_info info;
163 	uint16_t pending = 1;
164 	int ret;
165 
166 	info.buf_len = ICE_DCF_AQ_BUF_SZ;
167 	info.msg_buf = hw->arq_buf;
168 
169 	while (pending && !hw->resetting) {
170 		ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
171 		if (ret != IAVF_SUCCESS)
172 			break;
173 
174 		ice_dcf_aq_cmd_handle(hw, &info);
175 	}
176 }
177 
178 static int
ice_dcf_init_check_api_version(struct ice_dcf_hw * hw)179 ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
180 {
181 #define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START	1
182 #define ICE_CPF_VIRTCHNL_VERSION_MINOR_START	1
183 	struct virtchnl_version_info version, *pver;
184 	int err;
185 
186 	version.major = VIRTCHNL_VERSION_MAJOR;
187 	version.minor = VIRTCHNL_VERSION_MINOR;
188 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
189 					  (uint8_t *)&version, sizeof(version));
190 	if (err) {
191 		PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
192 		return err;
193 	}
194 
195 	pver = &hw->virtchnl_version;
196 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
197 					  (uint8_t *)pver, sizeof(*pver), NULL);
198 	if (err) {
199 		PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
200 		return -1;
201 	}
202 
203 	PMD_INIT_LOG(DEBUG,
204 		     "Peer PF API version: %u.%u", pver->major, pver->minor);
205 
206 	if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
207 	    (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
208 	     pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
209 		PMD_INIT_LOG(ERR,
210 			     "VIRTCHNL API version should not be lower than (%u.%u)",
211 			     ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
212 			     ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
213 		return -1;
214 	} else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
215 		   (pver->major == VIRTCHNL_VERSION_MAJOR &&
216 		    pver->minor > VIRTCHNL_VERSION_MINOR)) {
217 		PMD_INIT_LOG(ERR,
218 			     "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
219 			     pver->major, pver->minor,
220 			     VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
221 		return -1;
222 	}
223 
224 	PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
225 
226 	return 0;
227 }
228 
229 static int
ice_dcf_get_vf_resource(struct ice_dcf_hw * hw)230 ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
231 {
232 	uint32_t caps;
233 	int err, i;
234 
235 	caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
236 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
237 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
238 	       VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
239 	       VIRTCHNL_VF_OFFLOAD_QOS;
240 
241 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
242 					  (uint8_t *)&caps, sizeof(caps));
243 	if (err) {
244 		PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
245 		return err;
246 	}
247 
248 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
249 					  (uint8_t *)hw->vf_res,
250 					  ICE_DCF_VF_RES_BUF_SZ, NULL);
251 	if (err) {
252 		PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
253 		return -1;
254 	}
255 
256 	iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
257 
258 	hw->vsi_res = NULL;
259 	for (i = 0; i < hw->vf_res->num_vsis; i++) {
260 		if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
261 			hw->vsi_res = &hw->vf_res->vsi_res[i];
262 	}
263 
264 	if (!hw->vsi_res) {
265 		PMD_DRV_LOG(ERR, "no LAN VSI found");
266 		return -1;
267 	}
268 
269 	hw->vsi_id = hw->vsi_res->vsi_id;
270 	PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
271 
272 	return 0;
273 }
274 
275 static int
ice_dcf_get_vf_vsi_map(struct ice_dcf_hw * hw)276 ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
277 {
278 	struct virtchnl_dcf_vsi_map *vsi_map;
279 	uint32_t valid_msg_len;
280 	uint16_t len;
281 	int err;
282 
283 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
284 					  NULL, 0);
285 	if (err) {
286 		PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
287 		return err;
288 	}
289 
290 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
291 					  hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
292 					  &len);
293 	if (err) {
294 		PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
295 		return err;
296 	}
297 
298 	vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
299 	valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
300 			sizeof(*vsi_map);
301 	if (len != valid_msg_len) {
302 		PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
303 			    len);
304 		return -EINVAL;
305 	}
306 
307 	if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
308 		PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
309 			    vsi_map->num_vfs, hw->num_vfs);
310 		return -EINVAL;
311 	}
312 
313 	len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
314 
315 	if (!hw->vf_vsi_map) {
316 		hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
317 		if (!hw->vf_vsi_map) {
318 			PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
319 			return -ENOMEM;
320 		}
321 
322 		hw->num_vfs = vsi_map->num_vfs;
323 		hw->pf_vsi_id = vsi_map->pf_vsi;
324 	}
325 
326 	if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
327 		PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
328 		return 1;
329 	}
330 
331 	rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
332 	return 0;
333 }
334 
335 static int
ice_dcf_mode_disable(struct ice_dcf_hw * hw)336 ice_dcf_mode_disable(struct ice_dcf_hw *hw)
337 {
338 	int err;
339 
340 	if (hw->resetting)
341 		return 0;
342 
343 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
344 					  NULL, 0);
345 	if (err) {
346 		PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
347 		return err;
348 	}
349 
350 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
351 					  hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
352 	if (err) {
353 		PMD_DRV_LOG(ERR,
354 			    "Failed to get response of OP_DCF_DISABLE %d",
355 			    err);
356 		return -1;
357 	}
358 
359 	return 0;
360 }
361 
362 static int
ice_dcf_check_reset_done(struct ice_dcf_hw * hw)363 ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
364 {
365 #define ICE_DCF_RESET_WAIT_CNT       50
366 	struct iavf_hw *avf = &hw->avf;
367 	int i, reset;
368 
369 	for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
370 		reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
371 					IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
372 		reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
373 
374 		if (reset == VIRTCHNL_VFR_VFACTIVE ||
375 		    reset == VIRTCHNL_VFR_COMPLETED)
376 			break;
377 
378 		rte_delay_ms(20);
379 	}
380 
381 	if (i >= ICE_DCF_RESET_WAIT_CNT)
382 		return -1;
383 
384 	return 0;
385 }
386 
387 static inline void
ice_dcf_enable_irq0(struct ice_dcf_hw * hw)388 ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
389 {
390 	struct iavf_hw *avf = &hw->avf;
391 
392 	/* Enable admin queue interrupt trigger */
393 	IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
394 		       IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
395 	IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
396 		       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
397 		       IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
398 		       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
399 
400 	IAVF_WRITE_FLUSH(avf);
401 }
402 
403 static inline void
ice_dcf_disable_irq0(struct ice_dcf_hw * hw)404 ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
405 {
406 	struct iavf_hw *avf = &hw->avf;
407 
408 	/* Disable all interrupt types */
409 	IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
410 	IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
411 		       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
412 
413 	IAVF_WRITE_FLUSH(avf);
414 }
415 
416 static void
ice_dcf_dev_interrupt_handler(void * param)417 ice_dcf_dev_interrupt_handler(void *param)
418 {
419 	struct ice_dcf_hw *hw = param;
420 
421 	ice_dcf_disable_irq0(hw);
422 
423 	ice_dcf_handle_virtchnl_msg(hw);
424 
425 	ice_dcf_enable_irq0(hw);
426 }
427 
428 int
ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw * hw,struct dcf_virtchnl_cmd * cmd)429 ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
430 			     struct dcf_virtchnl_cmd *cmd)
431 {
432 	int i = 0;
433 	int err;
434 
435 	if ((cmd->req_msg && !cmd->req_msglen) ||
436 	    (!cmd->req_msg && cmd->req_msglen) ||
437 	    (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
438 	    (!cmd->rsp_msgbuf && cmd->rsp_buflen))
439 		return -EINVAL;
440 
441 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
442 	ice_dcf_vc_cmd_set(hw, cmd);
443 
444 	err = ice_dcf_vc_cmd_send(hw, cmd);
445 	if (err) {
446 		PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
447 		goto ret;
448 	}
449 
450 	do {
451 		if (!cmd->pending)
452 			break;
453 
454 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
455 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
456 
457 	if (cmd->v_ret != IAVF_SUCCESS) {
458 		err = -1;
459 		PMD_DRV_LOG(ERR,
460 			    "No response (%d times) or return failure (%d) for cmd %d",
461 			    i, cmd->v_ret, cmd->v_op);
462 	}
463 
464 ret:
465 	ice_dcf_aq_cmd_clear(hw, cmd);
466 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
467 	return err;
468 }
469 
470 int
ice_dcf_send_aq_cmd(void * dcf_hw,struct ice_aq_desc * desc,void * buf,uint16_t buf_size)471 ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
472 		    void *buf, uint16_t buf_size)
473 {
474 	struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
475 	struct ice_dcf_hw *hw = dcf_hw;
476 	int err = 0;
477 	int i = 0;
478 
479 	if ((buf && !buf_size) || (!buf && buf_size) ||
480 	    buf_size > ICE_DCF_AQ_BUF_SZ)
481 		return -EINVAL;
482 
483 	desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
484 	desc_cmd.req_msglen = sizeof(*desc);
485 	desc_cmd.req_msg = (uint8_t *)desc;
486 	desc_cmd.rsp_buflen = sizeof(*desc);
487 	desc_cmd.rsp_msgbuf = (uint8_t *)desc;
488 
489 	if (buf == NULL)
490 		return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
491 
492 	desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
493 
494 	buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
495 	buff_cmd.req_msglen = buf_size;
496 	buff_cmd.req_msg = buf;
497 	buff_cmd.rsp_buflen = buf_size;
498 	buff_cmd.rsp_msgbuf = buf;
499 
500 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
501 	ice_dcf_vc_cmd_set(hw, &desc_cmd);
502 	ice_dcf_vc_cmd_set(hw, &buff_cmd);
503 
504 	if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
505 	    ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
506 		err = -1;
507 		PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
508 		goto ret;
509 	}
510 
511 	do {
512 		if (!desc_cmd.pending && !buff_cmd.pending)
513 			break;
514 
515 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
516 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
517 
518 	if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
519 		err = -1;
520 		PMD_DRV_LOG(ERR,
521 			    "No response (%d times) or return failure (desc: %d / buff: %d)",
522 			    i, desc_cmd.v_ret, buff_cmd.v_ret);
523 	}
524 
525 ret:
526 	ice_dcf_aq_cmd_clear(hw, &desc_cmd);
527 	ice_dcf_aq_cmd_clear(hw, &buff_cmd);
528 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
529 
530 	return err;
531 }
532 
533 int
ice_dcf_handle_vsi_update_event(struct ice_dcf_hw * hw)534 ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
535 {
536 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
537 	int i = 0;
538 	int err = -1;
539 
540 	rte_spinlock_lock(&hw->vc_cmd_send_lock);
541 
542 	rte_intr_disable(pci_dev->intr_handle);
543 	ice_dcf_disable_irq0(hw);
544 
545 	for (;;) {
546 		if (ice_dcf_get_vf_resource(hw) == 0 &&
547 		    ice_dcf_get_vf_vsi_map(hw) >= 0) {
548 			err = 0;
549 			break;
550 		}
551 
552 		if (++i >= ICE_DCF_ARQ_MAX_RETRIES)
553 			break;
554 
555 		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
556 	}
557 
558 	rte_intr_enable(pci_dev->intr_handle);
559 	ice_dcf_enable_irq0(hw);
560 
561 	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
562 
563 	return err;
564 }
565 
566 static int
ice_dcf_get_supported_rxdid(struct ice_dcf_hw * hw)567 ice_dcf_get_supported_rxdid(struct ice_dcf_hw *hw)
568 {
569 	int err;
570 
571 	err = ice_dcf_send_cmd_req_no_irq(hw,
572 					  VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
573 					  NULL, 0);
574 	if (err) {
575 		PMD_INIT_LOG(ERR, "Failed to send OP_GET_SUPPORTED_RXDIDS");
576 		return -1;
577 	}
578 
579 	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
580 					  (uint8_t *)&hw->supported_rxdid,
581 					  sizeof(uint64_t), NULL);
582 	if (err) {
583 		PMD_INIT_LOG(ERR, "Failed to get response of OP_GET_SUPPORTED_RXDIDS");
584 		return -1;
585 	}
586 
587 	return 0;
588 }
589 
590 int
ice_dcf_init_hw(struct rte_eth_dev * eth_dev,struct ice_dcf_hw * hw)591 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
592 {
593 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
594 	int ret, size;
595 
596 	hw->resetting = false;
597 
598 	hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
599 	hw->avf.back = hw;
600 
601 	hw->avf.bus.bus_id = pci_dev->addr.bus;
602 	hw->avf.bus.device = pci_dev->addr.devid;
603 	hw->avf.bus.func = pci_dev->addr.function;
604 
605 	hw->avf.device_id = pci_dev->id.device_id;
606 	hw->avf.vendor_id = pci_dev->id.vendor_id;
607 	hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
608 	hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
609 
610 	hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
611 	hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
612 	hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
613 	hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
614 
615 	rte_spinlock_init(&hw->vc_cmd_send_lock);
616 	rte_spinlock_init(&hw->vc_cmd_queue_lock);
617 	TAILQ_INIT(&hw->vc_cmd_queue);
618 
619 	hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
620 	if (hw->arq_buf == NULL) {
621 		PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
622 		goto err;
623 	}
624 
625 	ret = iavf_set_mac_type(&hw->avf);
626 	if (ret) {
627 		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
628 		goto err;
629 	}
630 
631 	ret = ice_dcf_check_reset_done(hw);
632 	if (ret) {
633 		PMD_INIT_LOG(ERR, "VF is still resetting");
634 		goto err;
635 	}
636 
637 	ret = iavf_init_adminq(&hw->avf);
638 	if (ret) {
639 		PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
640 		goto err;
641 	}
642 
643 	if (ice_dcf_init_check_api_version(hw)) {
644 		PMD_INIT_LOG(ERR, "check_api version failed");
645 		goto err_api;
646 	}
647 
648 	hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
649 	if (hw->vf_res == NULL) {
650 		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
651 		goto err_api;
652 	}
653 
654 	if (ice_dcf_get_vf_resource(hw)) {
655 		PMD_INIT_LOG(ERR, "Failed to get VF resource");
656 		goto err_alloc;
657 	}
658 
659 	if (ice_dcf_get_vf_vsi_map(hw) < 0) {
660 		PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
661 		ice_dcf_mode_disable(hw);
662 		goto err_alloc;
663 	}
664 
665 	/* Allocate memory for RSS info */
666 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
667 		hw->rss_key = rte_zmalloc(NULL,
668 					  hw->vf_res->rss_key_size, 0);
669 		if (!hw->rss_key) {
670 			PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
671 			goto err_alloc;
672 		}
673 		hw->rss_lut = rte_zmalloc("rss_lut",
674 					  hw->vf_res->rss_lut_size, 0);
675 		if (!hw->rss_lut) {
676 			PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
677 			goto err_rss;
678 		}
679 	}
680 
681 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
682 		if (ice_dcf_get_supported_rxdid(hw) != 0) {
683 			PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
684 			goto err_rss;
685 		}
686 	}
687 
688 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
689 		ice_dcf_tm_conf_init(eth_dev);
690 		size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs;
691 		hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0);
692 		if (!hw->qos_bw_cfg) {
693 			PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg");
694 			goto err_rss;
695 		}
696 	}
697 
698 	hw->eth_dev = eth_dev;
699 	rte_intr_callback_register(pci_dev->intr_handle,
700 				   ice_dcf_dev_interrupt_handler, hw);
701 	rte_intr_enable(pci_dev->intr_handle);
702 	ice_dcf_enable_irq0(hw);
703 
704 	return 0;
705 
706 err_rss:
707 	rte_free(hw->rss_key);
708 	rte_free(hw->rss_lut);
709 err_alloc:
710 	rte_free(hw->vf_res);
711 err_api:
712 	iavf_shutdown_adminq(&hw->avf);
713 err:
714 	rte_free(hw->arq_buf);
715 
716 	return -1;
717 }
718 
719 void
ice_dcf_uninit_hw(struct rte_eth_dev * eth_dev,struct ice_dcf_hw * hw)720 ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
721 {
722 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
723 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
724 
725 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
726 		if (hw->tm_conf.committed) {
727 			ice_dcf_clear_bw(hw);
728 			ice_dcf_tm_conf_uninit(eth_dev);
729 		}
730 
731 	ice_dcf_disable_irq0(hw);
732 	rte_intr_disable(intr_handle);
733 	rte_intr_callback_unregister(intr_handle,
734 				     ice_dcf_dev_interrupt_handler, hw);
735 
736 	ice_dcf_mode_disable(hw);
737 	iavf_shutdown_adminq(&hw->avf);
738 
739 	rte_free(hw->arq_buf);
740 	hw->arq_buf = NULL;
741 
742 	rte_free(hw->vf_vsi_map);
743 	hw->vf_vsi_map = NULL;
744 
745 	rte_free(hw->vf_res);
746 	hw->vf_res = NULL;
747 
748 	rte_free(hw->rss_lut);
749 	hw->rss_lut = NULL;
750 
751 	rte_free(hw->rss_key);
752 	hw->rss_key = NULL;
753 
754 	rte_free(hw->qos_bw_cfg);
755 	hw->qos_bw_cfg = NULL;
756 
757 	rte_free(hw->ets_config);
758 	hw->ets_config = NULL;
759 }
760 
761 int
ice_dcf_configure_rss_key(struct ice_dcf_hw * hw)762 ice_dcf_configure_rss_key(struct ice_dcf_hw *hw)
763 {
764 	struct virtchnl_rss_key *rss_key;
765 	struct dcf_virtchnl_cmd args;
766 	int len, err;
767 
768 	len = sizeof(*rss_key) + hw->vf_res->rss_key_size - 1;
769 	rss_key = rte_zmalloc("rss_key", len, 0);
770 	if (!rss_key)
771 		return -ENOMEM;
772 
773 	rss_key->vsi_id = hw->vsi_res->vsi_id;
774 	rss_key->key_len = hw->vf_res->rss_key_size;
775 	rte_memcpy(rss_key->key, hw->rss_key, hw->vf_res->rss_key_size);
776 
777 	args.v_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
778 	args.req_msglen = len;
779 	args.req_msg = (uint8_t *)rss_key;
780 	args.rsp_msglen = 0;
781 	args.rsp_buflen = 0;
782 	args.rsp_msgbuf = NULL;
783 	args.pending = 0;
784 
785 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
786 	if (err)
787 		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_KEY");
788 
789 	rte_free(rss_key);
790 	return err;
791 }
792 
793 int
ice_dcf_configure_rss_lut(struct ice_dcf_hw * hw)794 ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw)
795 {
796 	struct virtchnl_rss_lut *rss_lut;
797 	struct dcf_virtchnl_cmd args;
798 	int len, err;
799 
800 	len = sizeof(*rss_lut) + hw->vf_res->rss_lut_size - 1;
801 	rss_lut = rte_zmalloc("rss_lut", len, 0);
802 	if (!rss_lut)
803 		return -ENOMEM;
804 
805 	rss_lut->vsi_id = hw->vsi_res->vsi_id;
806 	rss_lut->lut_entries = hw->vf_res->rss_lut_size;
807 	rte_memcpy(rss_lut->lut, hw->rss_lut, hw->vf_res->rss_lut_size);
808 
809 	args.v_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
810 	args.req_msglen = len;
811 	args.req_msg = (uint8_t *)rss_lut;
812 	args.rsp_msglen = 0;
813 	args.rsp_buflen = 0;
814 	args.rsp_msgbuf = NULL;
815 	args.pending = 0;
816 
817 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
818 	if (err)
819 		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_LUT");
820 
821 	rte_free(rss_lut);
822 	return err;
823 }
824 
825 int
ice_dcf_init_rss(struct ice_dcf_hw * hw)826 ice_dcf_init_rss(struct ice_dcf_hw *hw)
827 {
828 	struct rte_eth_dev *dev = hw->eth_dev;
829 	struct rte_eth_rss_conf *rss_conf;
830 	uint8_t i, j, nb_q;
831 	int ret;
832 
833 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
834 	nb_q = dev->data->nb_rx_queues;
835 
836 	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
837 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
838 		return -ENOTSUP;
839 	}
840 	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
841 		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
842 		/* set all lut items to default queue */
843 		memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
844 		return ice_dcf_configure_rss_lut(hw);
845 	}
846 
847 	/* In IAVF, RSS enablement is set by PF driver. It is not supported
848 	 * to set based on rss_conf->rss_hf.
849 	 */
850 
851 	/* configure RSS key */
852 	if (!rss_conf->rss_key)
853 		/* Calculate the default hash key */
854 		for (i = 0; i < hw->vf_res->rss_key_size; i++)
855 			hw->rss_key[i] = (uint8_t)rte_rand();
856 	else
857 		rte_memcpy(hw->rss_key, rss_conf->rss_key,
858 			   RTE_MIN(rss_conf->rss_key_len,
859 				   hw->vf_res->rss_key_size));
860 
861 	/* init RSS LUT table */
862 	for (i = 0, j = 0; i < hw->vf_res->rss_lut_size; i++, j++) {
863 		if (j >= nb_q)
864 			j = 0;
865 		hw->rss_lut[i] = j;
866 	}
867 	/* send virtchnl ops to configure RSS */
868 	ret = ice_dcf_configure_rss_lut(hw);
869 	if (ret)
870 		return ret;
871 	ret = ice_dcf_configure_rss_key(hw);
872 	if (ret)
873 		return ret;
874 
875 	return 0;
876 }
877 
878 #define IAVF_RXDID_LEGACY_0 0
879 #define IAVF_RXDID_LEGACY_1 1
880 #define IAVF_RXDID_COMMS_OVS_1 22
881 
882 int
ice_dcf_configure_queues(struct ice_dcf_hw * hw)883 ice_dcf_configure_queues(struct ice_dcf_hw *hw)
884 {
885 	struct ice_rx_queue **rxq =
886 		(struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
887 	struct ice_tx_queue **txq =
888 		(struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
889 	struct virtchnl_vsi_queue_config_info *vc_config;
890 	struct virtchnl_queue_pair_info *vc_qp;
891 	struct dcf_virtchnl_cmd args;
892 	uint16_t i, size;
893 	int err;
894 
895 	size = sizeof(*vc_config) +
896 	       sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
897 	vc_config = rte_zmalloc("cfg_queue", size, 0);
898 	if (!vc_config)
899 		return -ENOMEM;
900 
901 	vc_config->vsi_id = hw->vsi_res->vsi_id;
902 	vc_config->num_queue_pairs = hw->num_queue_pairs;
903 
904 	for (i = 0, vc_qp = vc_config->qpair;
905 	     i < hw->num_queue_pairs;
906 	     i++, vc_qp++) {
907 		vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
908 		vc_qp->txq.queue_id = i;
909 		if (i < hw->eth_dev->data->nb_tx_queues) {
910 			vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
911 			vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
912 		}
913 		vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
914 		vc_qp->rxq.queue_id = i;
915 
916 		if (i >= hw->eth_dev->data->nb_rx_queues)
917 			continue;
918 
919 		vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
920 		vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
921 		vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
922 		vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
923 
924 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
925 		if (hw->vf_res->vf_cap_flags &
926 		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
927 		    hw->supported_rxdid &
928 		    BIT(IAVF_RXDID_COMMS_OVS_1)) {
929 			vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
930 			PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
931 				    "Queue[%d]", vc_qp->rxq.rxdid, i);
932 		} else {
933 			PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
934 			return -EINVAL;
935 		}
936 #else
937 		if (hw->vf_res->vf_cap_flags &
938 			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
939 			hw->supported_rxdid &
940 			BIT(IAVF_RXDID_LEGACY_0)) {
941 			vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
942 			PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
943 					"Queue[%d]", vc_qp->rxq.rxdid, i);
944 		} else {
945 			PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
946 			return -EINVAL;
947 		}
948 #endif
949 		ice_select_rxd_to_pkt_fields_handler(rxq[i], vc_qp->rxq.rxdid);
950 	}
951 
952 	memset(&args, 0, sizeof(args));
953 	args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
954 	args.req_msg = (uint8_t *)vc_config;
955 	args.req_msglen = size;
956 
957 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
958 	if (err)
959 		PMD_DRV_LOG(ERR, "Failed to execute command of"
960 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
961 
962 	rte_free(vc_config);
963 	return err;
964 }
965 
966 int
ice_dcf_config_irq_map(struct ice_dcf_hw * hw)967 ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
968 {
969 	struct virtchnl_irq_map_info *map_info;
970 	struct virtchnl_vector_map *vecmap;
971 	struct dcf_virtchnl_cmd args;
972 	int len, i, err;
973 
974 	len = sizeof(struct virtchnl_irq_map_info) +
975 	      sizeof(struct virtchnl_vector_map) * hw->nb_msix;
976 
977 	map_info = rte_zmalloc("map_info", len, 0);
978 	if (!map_info)
979 		return -ENOMEM;
980 
981 	map_info->num_vectors = hw->nb_msix;
982 	for (i = 0; i < hw->nb_msix; i++) {
983 		vecmap = &map_info->vecmap[i];
984 		vecmap->vsi_id = hw->vsi_res->vsi_id;
985 		vecmap->rxitr_idx = 0;
986 		vecmap->vector_id = hw->msix_base + i;
987 		vecmap->txq_map = 0;
988 		vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
989 	}
990 
991 	memset(&args, 0, sizeof(args));
992 	args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
993 	args.req_msg = (u8 *)map_info;
994 	args.req_msglen = len;
995 
996 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
997 	if (err)
998 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
999 
1000 	rte_free(map_info);
1001 	return err;
1002 }
1003 
1004 int
ice_dcf_switch_queue(struct ice_dcf_hw * hw,uint16_t qid,bool rx,bool on)1005 ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
1006 {
1007 	struct virtchnl_queue_select queue_select;
1008 	struct dcf_virtchnl_cmd args;
1009 	int err;
1010 
1011 	memset(&queue_select, 0, sizeof(queue_select));
1012 	queue_select.vsi_id = hw->vsi_res->vsi_id;
1013 	if (rx)
1014 		queue_select.rx_queues |= 1 << qid;
1015 	else
1016 		queue_select.tx_queues |= 1 << qid;
1017 
1018 	memset(&args, 0, sizeof(args));
1019 	if (on)
1020 		args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
1021 	else
1022 		args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
1023 
1024 	args.req_msg = (u8 *)&queue_select;
1025 	args.req_msglen = sizeof(queue_select);
1026 
1027 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1028 	if (err)
1029 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
1030 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
1031 
1032 	return err;
1033 }
1034 
1035 int
ice_dcf_disable_queues(struct ice_dcf_hw * hw)1036 ice_dcf_disable_queues(struct ice_dcf_hw *hw)
1037 {
1038 	struct virtchnl_queue_select queue_select;
1039 	struct dcf_virtchnl_cmd args;
1040 	int err;
1041 
1042 	if (hw->resetting)
1043 		return 0;
1044 
1045 	memset(&queue_select, 0, sizeof(queue_select));
1046 	queue_select.vsi_id = hw->vsi_res->vsi_id;
1047 
1048 	queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
1049 	queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
1050 
1051 	memset(&args, 0, sizeof(args));
1052 	args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
1053 	args.req_msg = (u8 *)&queue_select;
1054 	args.req_msglen = sizeof(queue_select);
1055 
1056 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1057 	if (err)
1058 		PMD_DRV_LOG(ERR,
1059 			    "Failed to execute command of OP_DISABLE_QUEUES");
1060 
1061 	return err;
1062 }
1063 
1064 int
ice_dcf_query_stats(struct ice_dcf_hw * hw,struct virtchnl_eth_stats * pstats)1065 ice_dcf_query_stats(struct ice_dcf_hw *hw,
1066 				   struct virtchnl_eth_stats *pstats)
1067 {
1068 	struct virtchnl_queue_select q_stats;
1069 	struct dcf_virtchnl_cmd args;
1070 	int err;
1071 
1072 	memset(&q_stats, 0, sizeof(q_stats));
1073 	q_stats.vsi_id = hw->vsi_res->vsi_id;
1074 
1075 	args.v_op = VIRTCHNL_OP_GET_STATS;
1076 	args.req_msg = (uint8_t *)&q_stats;
1077 	args.req_msglen = sizeof(q_stats);
1078 	args.rsp_msglen = sizeof(*pstats);
1079 	args.rsp_msgbuf = (uint8_t *)pstats;
1080 	args.rsp_buflen = sizeof(*pstats);
1081 
1082 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1083 	if (err) {
1084 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
1085 		return err;
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 int
ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw * hw,struct rte_ether_addr * addr,bool add,uint8_t type)1092 ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw,
1093 			     struct rte_ether_addr *addr,
1094 			     bool add, uint8_t type)
1095 {
1096 	struct virtchnl_ether_addr_list *list;
1097 	struct dcf_virtchnl_cmd args;
1098 	int len, err = 0;
1099 
1100 	if (hw->resetting) {
1101 		if (!add)
1102 			return 0;
1103 
1104 		PMD_DRV_LOG(ERR, "fail to add all MACs for VF resetting");
1105 		return -EIO;
1106 	}
1107 
1108 	len = sizeof(struct virtchnl_ether_addr_list);
1109 	len += sizeof(struct virtchnl_ether_addr);
1110 
1111 	list = rte_zmalloc(NULL, len, 0);
1112 	if (!list) {
1113 		PMD_DRV_LOG(ERR, "fail to allocate memory");
1114 		return -ENOMEM;
1115 	}
1116 
1117 	rte_memcpy(list->list[0].addr, addr->addr_bytes,
1118 			sizeof(addr->addr_bytes));
1119 
1120 	PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
1121 			    RTE_ETHER_ADDR_BYTES(addr));
1122 	list->list[0].type = type;
1123 	list->vsi_id = hw->vsi_res->vsi_id;
1124 	list->num_elements = 1;
1125 
1126 	memset(&args, 0, sizeof(args));
1127 	args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1128 			VIRTCHNL_OP_DEL_ETH_ADDR;
1129 	args.req_msg = (uint8_t *)list;
1130 	args.req_msglen  = len;
1131 	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1132 	if (err)
1133 		PMD_DRV_LOG(ERR, "fail to execute command %s",
1134 			    add ? "OP_ADD_ETHER_ADDRESS" :
1135 			    "OP_DEL_ETHER_ADDRESS");
1136 	rte_free(list);
1137 	return err;
1138 }
1139