xref: /dpdk/drivers/net/ice/ice_dcf_parent.c (revision ea9fac93)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <sys/stat.h>
6 #include <pthread.h>
7 #include <unistd.h>
8 
9 #include <rte_spinlock.h>
10 
11 #include "ice_dcf_ethdev.h"
12 #include "ice_generic_flow.h"
13 
14 #define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL	100000 /* us */
15 static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
16 
17 struct ice_dcf_reset_event_param {
18 	struct ice_dcf_hw *dcf_hw;
19 
20 	bool vfr; /* VF reset event */
21 	uint16_t vf_id; /* The reset VF ID */
22 };
23 
24 static __rte_always_inline void
ice_dcf_update_vsi_ctx(struct ice_hw * hw,uint16_t vsi_handle,uint16_t vsi_map)25 ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
26 		       uint16_t vsi_map)
27 {
28 	struct ice_vsi_ctx *vsi_ctx;
29 	bool first_update = false;
30 	uint16_t new_vsi_num;
31 
32 	if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
33 		PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
34 		return;
35 	}
36 
37 	vsi_ctx = hw->vsi_ctx[vsi_handle];
38 
39 	if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) {
40 		if (!vsi_ctx) {
41 			vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
42 			if (!vsi_ctx) {
43 				PMD_DRV_LOG(ERR, "No memory for vsi context %u",
44 					    vsi_handle);
45 				return;
46 			}
47 			hw->vsi_ctx[vsi_handle] = vsi_ctx;
48 			first_update = true;
49 		}
50 
51 		new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
52 			VIRTCHNL_DCF_VF_VSI_ID_S;
53 
54 		/* Redirect rules if vsi mapping table changes. */
55 		if (!first_update) {
56 			struct ice_flow_redirect rd;
57 
58 			memset(&rd, 0, sizeof(struct ice_flow_redirect));
59 			rd.type = ICE_FLOW_REDIRECT_VSI;
60 			rd.vsi_handle = vsi_handle;
61 			rd.new_vsi_num = new_vsi_num;
62 			ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
63 		} else {
64 			vsi_ctx->vsi_num = new_vsi_num;
65 		}
66 
67 		PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
68 			    vsi_handle, vsi_ctx->vsi_num);
69 	} else {
70 		hw->vsi_ctx[vsi_handle] = NULL;
71 
72 		ice_free(hw, vsi_ctx);
73 
74 		PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle);
75 	}
76 }
77 
78 static void
ice_dcf_update_vf_vsi_map(struct ice_hw * hw,uint16_t num_vfs,uint16_t * vf_vsi_map)79 ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
80 			  uint16_t *vf_vsi_map)
81 {
82 	uint16_t vf_id;
83 
84 	for (vf_id = 0; vf_id < num_vfs; vf_id++)
85 		ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
86 }
87 
88 static void
ice_dcf_update_pf_vsi_map(struct ice_hw * hw,uint16_t pf_vsi_idx,uint16_t pf_vsi_num)89 ice_dcf_update_pf_vsi_map(struct ice_hw *hw, uint16_t pf_vsi_idx,
90 			uint16_t pf_vsi_num)
91 {
92 	struct ice_vsi_ctx *vsi_ctx;
93 
94 	if (unlikely(pf_vsi_idx >= ICE_MAX_VSI)) {
95 		PMD_DRV_LOG(ERR, "Invalid vsi handle %u", pf_vsi_idx);
96 		return;
97 	}
98 
99 	vsi_ctx = hw->vsi_ctx[pf_vsi_idx];
100 
101 	if (!vsi_ctx)
102 		vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
103 
104 	if (!vsi_ctx) {
105 		PMD_DRV_LOG(ERR, "No memory for vsi context %u",
106 				pf_vsi_idx);
107 		return;
108 	}
109 
110 	vsi_ctx->vsi_num = pf_vsi_num;
111 	hw->vsi_ctx[pf_vsi_idx] = vsi_ctx;
112 
113 	PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
114 			pf_vsi_idx, vsi_ctx->vsi_num);
115 }
116 
117 static void*
ice_dcf_vsi_update_service_handler(void * param)118 ice_dcf_vsi_update_service_handler(void *param)
119 {
120 	struct ice_dcf_reset_event_param *reset_param = param;
121 	struct ice_dcf_hw *hw = reset_param->dcf_hw;
122 	struct ice_dcf_adapter *adapter =
123 		container_of(hw, struct ice_dcf_adapter, real_hw);
124 	struct ice_adapter *parent_adapter = &adapter->parent;
125 
126 	pthread_detach(pthread_self());
127 
128 	rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
129 
130 	rte_spinlock_lock(&vsi_update_lock);
131 
132 	if (!ice_dcf_handle_vsi_update_event(hw)) {
133 		__atomic_store_n(&parent_adapter->dcf_state_on, true,
134 				 __ATOMIC_RELAXED);
135 		ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
136 					  hw->num_vfs, hw->vf_vsi_map);
137 	}
138 
139 	if (reset_param->vfr && adapter->repr_infos) {
140 		struct rte_eth_dev *vf_rep_eth_dev =
141 			adapter->repr_infos[reset_param->vf_id].vf_rep_eth_dev;
142 		if (vf_rep_eth_dev && vf_rep_eth_dev->data->dev_started) {
143 			PMD_DRV_LOG(DEBUG, "VF%u representor is resetting",
144 				    reset_param->vf_id);
145 			ice_dcf_vf_repr_init_vlan(vf_rep_eth_dev);
146 		}
147 	}
148 
149 	if (hw->tm_conf.committed)
150 		ice_dcf_replay_vf_bw(hw, reset_param->vf_id);
151 
152 	rte_spinlock_unlock(&vsi_update_lock);
153 
154 	free(param);
155 
156 	return NULL;
157 }
158 
159 static void
start_vsi_reset_thread(struct ice_dcf_hw * dcf_hw,bool vfr,uint16_t vf_id)160 start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
161 {
162 #define THREAD_NAME_LEN	16
163 	struct ice_dcf_reset_event_param *param;
164 	char name[THREAD_NAME_LEN];
165 	pthread_t thread;
166 	int ret;
167 
168 	param = malloc(sizeof(*param));
169 	if (!param) {
170 		PMD_DRV_LOG(ERR, "Failed to allocate the memory for reset handling");
171 		return;
172 	}
173 
174 	param->dcf_hw = dcf_hw;
175 	param->vfr = vfr;
176 	param->vf_id = vf_id;
177 
178 	snprintf(name, sizeof(name), "ice-reset-%u", vf_id);
179 	ret = rte_ctrl_thread_create(&thread, name, NULL,
180 				     ice_dcf_vsi_update_service_handler, param);
181 	if (ret != 0) {
182 		PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
183 		free(param);
184 	}
185 }
186 
187 static uint32_t
ice_dcf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)188 ice_dcf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
189 {
190 	uint32_t speed;
191 
192 	switch (virt_link_speed) {
193 	case VIRTCHNL_LINK_SPEED_100MB:
194 		speed = 100;
195 		break;
196 	case VIRTCHNL_LINK_SPEED_1GB:
197 		speed = 1000;
198 		break;
199 	case VIRTCHNL_LINK_SPEED_10GB:
200 		speed = 10000;
201 		break;
202 	case VIRTCHNL_LINK_SPEED_40GB:
203 		speed = 40000;
204 		break;
205 	case VIRTCHNL_LINK_SPEED_20GB:
206 		speed = 20000;
207 		break;
208 	case VIRTCHNL_LINK_SPEED_25GB:
209 		speed = 25000;
210 		break;
211 	case VIRTCHNL_LINK_SPEED_2_5GB:
212 		speed = 2500;
213 		break;
214 	case VIRTCHNL_LINK_SPEED_5GB:
215 		speed = 5000;
216 		break;
217 	default:
218 		speed = 0;
219 		break;
220 	}
221 
222 	return speed;
223 }
224 
225 void
ice_dcf_handle_pf_event_msg(struct ice_dcf_hw * dcf_hw,uint8_t * msg,uint16_t msglen)226 ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
227 			    uint8_t *msg, uint16_t msglen)
228 {
229 	struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
230 	struct ice_dcf_adapter *adapter =
231 		container_of(dcf_hw, struct ice_dcf_adapter, real_hw);
232 	struct ice_adapter *parent_adapter = &adapter->parent;
233 
234 	if (msglen < sizeof(struct virtchnl_pf_event)) {
235 		PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
236 		return;
237 	}
238 
239 	switch (pf_msg->event) {
240 	case VIRTCHNL_EVENT_RESET_IMPENDING:
241 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
242 		dcf_hw->resetting = true;
243 		break;
244 	case VIRTCHNL_EVENT_LINK_CHANGE:
245 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
246 		dcf_hw->link_up = pf_msg->event_data.link_event.link_status;
247 		if (dcf_hw->vf_res->vf_cap_flags &
248 			VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
249 			dcf_hw->link_speed =
250 				pf_msg->event_data.link_event_adv.link_speed;
251 		} else {
252 			enum virtchnl_link_speed speed;
253 			speed = pf_msg->event_data.link_event.link_speed;
254 			dcf_hw->link_speed = ice_dcf_convert_link_speed(speed);
255 		}
256 		ice_dcf_link_update(dcf_hw->eth_dev, 0);
257 		rte_eth_dev_callback_process(dcf_hw->eth_dev,
258 			RTE_ETH_EVENT_INTR_LSC, NULL);
259 		break;
260 	case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
261 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
262 		break;
263 	case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE:
264 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
265 			    pf_msg->event_data.vf_vsi_map.vf_id,
266 			    pf_msg->event_data.vf_vsi_map.vsi_id);
267 		__atomic_store_n(&parent_adapter->dcf_state_on, false,
268 				 __ATOMIC_RELAXED);
269 		start_vsi_reset_thread(dcf_hw, true,
270 				       pf_msg->event_data.vf_vsi_map.vf_id);
271 		break;
272 	default:
273 		PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
274 		break;
275 	}
276 }
277 
278 static int
ice_dcf_query_port_ets(struct ice_hw * parent_hw,struct ice_dcf_hw * real_hw)279 ice_dcf_query_port_ets(struct ice_hw *parent_hw, struct ice_dcf_hw *real_hw)
280 {
281 	int ret;
282 
283 	real_hw->ets_config = (struct ice_aqc_port_ets_elem *)
284 			ice_malloc(real_hw, sizeof(*real_hw->ets_config));
285 	if (!real_hw->ets_config)
286 		return ICE_ERR_NO_MEMORY;
287 
288 	ret = ice_aq_query_port_ets(parent_hw->port_info,
289 			real_hw->ets_config, sizeof(*real_hw->ets_config),
290 			NULL);
291 	if (ret) {
292 		PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
293 		rte_free(real_hw->ets_config);
294 		real_hw->ets_config = NULL;
295 		return ret;
296 	}
297 
298 	return ICE_SUCCESS;
299 }
300 
301 static int
ice_dcf_init_parent_hw(struct ice_hw * hw)302 ice_dcf_init_parent_hw(struct ice_hw *hw)
303 {
304 	struct ice_aqc_get_phy_caps_data *pcaps;
305 	enum ice_status status;
306 
307 	status = ice_aq_get_fw_ver(hw, NULL);
308 	if (status)
309 		return status;
310 
311 	status = ice_get_caps(hw);
312 	if (status)
313 		return status;
314 
315 	hw->port_info = (struct ice_port_info *)
316 			ice_malloc(hw, sizeof(*hw->port_info));
317 	if (!hw->port_info)
318 		return ICE_ERR_NO_MEMORY;
319 
320 	/* set the back pointer to HW */
321 	hw->port_info->hw = hw;
322 
323 	/* Initialize port_info struct with switch configuration data */
324 	status = ice_get_initial_sw_cfg(hw);
325 	if (status)
326 		goto err_unroll_alloc;
327 
328 	pcaps = (struct ice_aqc_get_phy_caps_data *)
329 		ice_malloc(hw, sizeof(*pcaps));
330 	if (!pcaps) {
331 		status = ICE_ERR_NO_MEMORY;
332 		goto err_unroll_alloc;
333 	}
334 
335 	/* Initialize port_info struct with PHY capabilities */
336 	status = ice_aq_get_phy_caps(hw->port_info, false,
337 				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
338 	ice_free(hw, pcaps);
339 	if (status)
340 		goto err_unroll_alloc;
341 
342 	/* Initialize port_info struct with link information */
343 	status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL);
344 	if (status)
345 		goto err_unroll_alloc;
346 
347 	status = ice_init_fltr_mgmt_struct(hw);
348 	if (status)
349 		goto err_unroll_alloc;
350 
351 	status = ice_init_hw_tbls(hw);
352 	if (status)
353 		goto err_unroll_fltr_mgmt_struct;
354 
355 	PMD_INIT_LOG(INFO,
356 		     "firmware %d.%d.%d api %d.%d.%d build 0x%08x",
357 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
358 		     hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
359 		     hw->fw_build);
360 
361 	return ICE_SUCCESS;
362 
363 err_unroll_fltr_mgmt_struct:
364 	ice_cleanup_fltr_mgmt_struct(hw);
365 err_unroll_alloc:
366 	ice_free(hw, hw->port_info);
367 	hw->port_info = NULL;
368 	hw->switch_info = NULL;
369 
370 	return status;
371 }
372 
ice_dcf_uninit_parent_hw(struct ice_hw * hw)373 static void ice_dcf_uninit_parent_hw(struct ice_hw *hw)
374 {
375 	ice_cleanup_fltr_mgmt_struct(hw);
376 
377 	ice_free_seg(hw);
378 	ice_free_hw_tbls(hw);
379 
380 	ice_free(hw, hw->port_info);
381 	hw->port_info = NULL;
382 	hw->switch_info = NULL;
383 
384 	ice_clear_all_vsi_ctx(hw);
385 }
386 
387 static int
ice_dcf_load_pkg(struct ice_adapter * adapter)388 ice_dcf_load_pkg(struct ice_adapter *adapter)
389 {
390 	struct ice_dcf_adapter *dcf_adapter =
391 			container_of(&adapter->hw, struct ice_dcf_adapter, parent.hw);
392 	struct virtchnl_pkg_info pkg_info;
393 	struct dcf_virtchnl_cmd vc_cmd;
394 	bool use_dsn;
395 	uint64_t dsn = 0;
396 
397 	vc_cmd.v_op = VIRTCHNL_OP_DCF_GET_PKG_INFO;
398 	vc_cmd.req_msglen = 0;
399 	vc_cmd.req_msg = NULL;
400 	vc_cmd.rsp_buflen = sizeof(pkg_info);
401 	vc_cmd.rsp_msgbuf = (uint8_t *)&pkg_info;
402 
403 	use_dsn = ice_dcf_execute_virtchnl_cmd(&dcf_adapter->real_hw, &vc_cmd) == 0;
404 	if (use_dsn)
405 		rte_memcpy(&dsn, pkg_info.dsn, sizeof(dsn));
406 
407 	return ice_load_pkg(adapter, use_dsn, dsn);
408 }
409 
410 int
ice_dcf_init_parent_adapter(struct rte_eth_dev * eth_dev)411 ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
412 {
413 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
414 	struct ice_adapter *parent_adapter = &adapter->parent;
415 	struct ice_hw *parent_hw = &parent_adapter->hw;
416 	struct ice_dcf_hw *hw = &adapter->real_hw;
417 	const struct rte_ether_addr *mac;
418 	int err;
419 
420 	parent_adapter->pf.adapter = parent_adapter;
421 	parent_adapter->pf.dev_data = eth_dev->data;
422 	/* create a dummy main_vsi */
423 	parent_adapter->pf.main_vsi =
424 		rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
425 	if (!parent_adapter->pf.main_vsi)
426 		return -ENOMEM;
427 	parent_adapter->pf.main_vsi->adapter = parent_adapter;
428 	parent_adapter->pf.adapter_stopped = 1;
429 
430 	parent_hw->back = parent_adapter;
431 	parent_hw->mac_type = ICE_MAC_GENERIC;
432 	parent_hw->vendor_id = ICE_INTEL_VENDOR_ID;
433 
434 	ice_init_lock(&parent_hw->adminq.sq_lock);
435 	ice_init_lock(&parent_hw->adminq.rq_lock);
436 	parent_hw->aq_send_cmd_fn = ice_dcf_send_aq_cmd;
437 	parent_hw->aq_send_cmd_param = &adapter->real_hw;
438 	parent_hw->dcf_enabled = true;
439 
440 	err = ice_dcf_init_parent_hw(parent_hw);
441 	if (err) {
442 		PMD_INIT_LOG(ERR, "failed to init the DCF parent hardware with error %d",
443 			     err);
444 		return err;
445 	}
446 
447 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
448 		err = ice_dcf_query_port_ets(parent_hw, hw);
449 		if (err) {
450 			PMD_INIT_LOG(ERR, "failed to query port ets with error %d",
451 				     err);
452 			goto uninit_hw;
453 		}
454 	}
455 
456 	err = ice_dcf_load_pkg(parent_adapter);
457 	if (err) {
458 		PMD_INIT_LOG(ERR, "failed to load package with error %d",
459 			     err);
460 		goto uninit_hw;
461 	}
462 
463 	parent_adapter->pf.main_vsi->idx = hw->num_vfs;
464 	ice_dcf_update_pf_vsi_map(parent_hw,
465 			parent_adapter->pf.main_vsi->idx, hw->pf_vsi_id);
466 
467 	ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
468 
469 	err = ice_flow_init(parent_adapter);
470 	if (err) {
471 		PMD_INIT_LOG(ERR, "Failed to initialize flow");
472 		goto uninit_hw;
473 	}
474 
475 	mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
476 	if (rte_is_valid_assigned_ether_addr(mac))
477 		rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
478 	else
479 		rte_eth_random_addr(parent_adapter->pf.dev_addr.addr_bytes);
480 
481 	eth_dev->data->mac_addrs = &parent_adapter->pf.dev_addr;
482 
483 	return 0;
484 
485 uninit_hw:
486 	ice_dcf_uninit_parent_hw(parent_hw);
487 	return err;
488 }
489 
490 void
ice_dcf_uninit_parent_adapter(struct rte_eth_dev * eth_dev)491 ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
492 {
493 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
494 	struct ice_adapter *parent_adapter = &adapter->parent;
495 	struct ice_hw *parent_hw = &parent_adapter->hw;
496 
497 	eth_dev->data->mac_addrs = NULL;
498 	rte_free(parent_adapter->pf.main_vsi);
499 	parent_adapter->pf.main_vsi = NULL;
500 
501 	ice_flow_uninit(parent_adapter);
502 	ice_dcf_uninit_parent_hw(parent_hw);
503 }
504