1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <sys/types.h>
7 
8 #include <rte_ethdev.h>
9 
10 #include "ice_dcf_ethdev.h"
11 #include "ice_rxtx.h"
12 
13 static uint16_t
ice_dcf_vf_repr_rx_burst(__rte_unused void * rxq,__rte_unused struct rte_mbuf ** rx_pkts,__rte_unused uint16_t nb_pkts)14 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
15 			 __rte_unused struct rte_mbuf **rx_pkts,
16 			 __rte_unused uint16_t nb_pkts)
17 {
18 	return 0;
19 }
20 
21 static uint16_t
ice_dcf_vf_repr_tx_burst(__rte_unused void * txq,__rte_unused struct rte_mbuf ** tx_pkts,__rte_unused uint16_t nb_pkts)22 ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
23 			 __rte_unused struct rte_mbuf **tx_pkts,
24 			 __rte_unused uint16_t nb_pkts)
25 {
26 	return 0;
27 }
28 
29 static int
ice_dcf_vf_repr_dev_configure(struct rte_eth_dev * dev)30 ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
31 {
32 	ice_dcf_vf_repr_init_vlan(dev);
33 
34 	return 0;
35 }
36 
37 static int
ice_dcf_vf_repr_dev_start(struct rte_eth_dev * dev)38 ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
39 {
40 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
41 
42 	return 0;
43 }
44 
45 static int
ice_dcf_vf_repr_dev_stop(struct rte_eth_dev * dev)46 ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
47 {
48 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
49 
50 	return 0;
51 }
52 
53 static int
ice_dcf_vf_repr_dev_close(struct rte_eth_dev * dev)54 ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
55 {
56 	return ice_dcf_vf_repr_uninit(dev);
57 }
58 
59 static int
ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,__rte_unused uint16_t nb_desc,__rte_unused unsigned int socket_id,__rte_unused const struct rte_eth_rxconf * conf,__rte_unused struct rte_mempool * pool)60 ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
61 			       __rte_unused uint16_t queue_id,
62 			       __rte_unused uint16_t nb_desc,
63 			       __rte_unused unsigned int socket_id,
64 			       __rte_unused const struct rte_eth_rxconf *conf,
65 			       __rte_unused struct rte_mempool *pool)
66 {
67 	return 0;
68 }
69 
70 static int
ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,__rte_unused uint16_t nb_desc,__rte_unused unsigned int socket_id,__rte_unused const struct rte_eth_txconf * conf)71 ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
72 			       __rte_unused uint16_t queue_id,
73 			       __rte_unused uint16_t nb_desc,
74 			       __rte_unused unsigned int socket_id,
75 			       __rte_unused const struct rte_eth_txconf *conf)
76 {
77 	return 0;
78 }
79 
80 static int
ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev * ethdev)81 ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
82 {
83 	return 0;
84 }
85 
86 static int
ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev * ethdev)87 ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
88 {
89 	return 0;
90 }
91 
92 static int
ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev * dev)93 ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
94 {
95 	return 0;
96 }
97 
98 static int
ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev * dev)99 ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
100 {
101 	return 0;
102 }
103 
104 static int
ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev * ethdev,__rte_unused int wait_to_complete)105 ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
106 			    __rte_unused int wait_to_complete)
107 {
108 	return 0;
109 }
110 
111 static __rte_always_inline struct ice_dcf_hw *
ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr * repr)112 ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
113 {
114 	struct ice_dcf_adapter *dcf_adapter =
115 			repr->dcf_eth_dev->data->dev_private;
116 
117 	if (!dcf_adapter) {
118 		PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n");
119 		return NULL;
120 	}
121 
122 	return &dcf_adapter->real_hw;
123 }
124 
125 static int
ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)126 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
127 			     struct rte_eth_dev_info *dev_info)
128 {
129 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
130 	struct ice_dcf_hw *dcf_hw = ice_dcf_vf_repr_hw(repr);
131 
132 	if (!dcf_hw)
133 		return -EIO;
134 
135 	dev_info->device = dev->device;
136 	dev_info->max_mac_addrs = 1;
137 	dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
138 	dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
139 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
140 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
141 	dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
142 	dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
143 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
144 
145 	dev_info->rx_offload_capa =
146 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
147 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
148 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
149 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
150 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
151 		RTE_ETH_RX_OFFLOAD_SCATTER |
152 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
153 		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
154 		RTE_ETH_RX_OFFLOAD_RSS_HASH;
155 	dev_info->tx_offload_capa =
156 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
157 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
158 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
159 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
160 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
161 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
162 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
163 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
164 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
165 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
166 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
167 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
168 
169 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
170 		.rx_thresh = {
171 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
172 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
173 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
174 		},
175 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
176 		.rx_drop_en = 0,
177 		.offloads = 0,
178 	};
179 
180 	dev_info->default_txconf = (struct rte_eth_txconf) {
181 		.tx_thresh = {
182 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
183 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
184 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
185 		},
186 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
187 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
188 		.offloads = 0,
189 	};
190 
191 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
192 		.nb_max = ICE_MAX_RING_DESC,
193 		.nb_min = ICE_MIN_RING_DESC,
194 		.nb_align = ICE_ALIGN_RING_DESC,
195 	};
196 
197 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
198 		.nb_max = ICE_MAX_RING_DESC,
199 		.nb_min = ICE_MIN_RING_DESC,
200 		.nb_align = ICE_ALIGN_RING_DESC,
201 	};
202 
203 	dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
204 	dev_info->switch_info.domain_id = repr->switch_domain_id;
205 	dev_info->switch_info.port_id = repr->vf_id;
206 
207 	return 0;
208 }
209 
210 static __rte_always_inline bool
ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr * repr)211 ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
212 {
213 	return !!(ice_dcf_vf_repr_hw(repr)->vf_res->vf_cap_flags &
214 		  VIRTCHNL_VF_OFFLOAD_VLAN_V2);
215 }
216 
217 static int
ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr * repr,struct virtchnl_dcf_vlan_offload * vlan_offload)218 ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
219 			    struct virtchnl_dcf_vlan_offload *vlan_offload)
220 {
221 	struct dcf_virtchnl_cmd args;
222 	int err;
223 
224 	memset(&args, 0, sizeof(args));
225 	args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
226 	args.req_msg = (uint8_t *)vlan_offload;
227 	args.req_msglen = sizeof(*vlan_offload);
228 
229 	err = ice_dcf_execute_virtchnl_cmd(ice_dcf_vf_repr_hw(repr), &args);
230 	if (err)
231 		PMD_DRV_LOG(ERR,
232 			    "Failed to execute command of VIRTCHNL_OP_DCF_VLAN_OFFLOAD");
233 
234 	return err;
235 }
236 
237 static int
ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev * dev,int mask)238 ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
239 {
240 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
241 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
242 	struct virtchnl_dcf_vlan_offload vlan_offload;
243 	int err;
244 
245 	if (!ice_dcf_vlan_offload_ena(repr))
246 		return -ENOTSUP;
247 
248 	/* Vlan stripping setting */
249 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
250 		bool enable = !!(dev_conf->rxmode.offloads &
251 				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
252 
253 		if (enable && repr->outer_vlan_info.port_vlan_ena) {
254 			PMD_DRV_LOG(ERR,
255 				    "Disable the port VLAN firstly\n");
256 			return -EINVAL;
257 		}
258 
259 		memset(&vlan_offload, 0, sizeof(vlan_offload));
260 
261 		if (enable)
262 			vlan_offload.vlan_flags =
263 					VIRTCHNL_DCF_VLAN_STRIP_INTO_RX_DESC <<
264 					VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
265 		else if (repr->outer_vlan_info.stripping_ena && !enable)
266 			vlan_offload.vlan_flags =
267 					VIRTCHNL_DCF_VLAN_STRIP_DISABLE <<
268 					VIRTCHNL_DCF_VLAN_STRIP_MODE_S;
269 
270 		if (vlan_offload.vlan_flags) {
271 			vlan_offload.vf_id = repr->vf_id;
272 			vlan_offload.tpid = repr->outer_vlan_info.tpid;
273 			vlan_offload.vlan_flags |=
274 					VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
275 					VIRTCHNL_DCF_VLAN_TYPE_S;
276 
277 			err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
278 			if (err)
279 				return -EIO;
280 
281 			repr->outer_vlan_info.stripping_ena = enable;
282 		}
283 	}
284 
285 	return 0;
286 }
287 
288 static int
ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev * dev,uint16_t pvid,int on)289 ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
290 			      uint16_t pvid, int on)
291 {
292 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
293 	struct virtchnl_dcf_vlan_offload vlan_offload;
294 	int err;
295 
296 	if (!ice_dcf_vlan_offload_ena(repr))
297 		return -ENOTSUP;
298 
299 	if (repr->outer_vlan_info.stripping_ena) {
300 		PMD_DRV_LOG(ERR,
301 			    "Disable the VLAN stripping firstly\n");
302 		return -EINVAL;
303 	}
304 
305 	if (pvid > RTE_ETHER_MAX_VLAN_ID)
306 		return -EINVAL;
307 
308 	memset(&vlan_offload, 0, sizeof(vlan_offload));
309 
310 	if (on)
311 		vlan_offload.vlan_flags =
312 				(VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
313 				 VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
314 	else
315 		vlan_offload.vlan_flags =
316 				(VIRTCHNL_DCF_VLAN_INSERT_DISABLE <<
317 				 VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
318 
319 	vlan_offload.vf_id = repr->vf_id;
320 	vlan_offload.tpid = repr->outer_vlan_info.tpid;
321 	vlan_offload.vlan_flags |= (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
322 				    VIRTCHNL_DCF_VLAN_TYPE_S);
323 	vlan_offload.vlan_id = pvid;
324 
325 	err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
326 	if (!err) {
327 		if (on) {
328 			repr->outer_vlan_info.port_vlan_ena = true;
329 			repr->outer_vlan_info.vid = pvid;
330 		} else {
331 			repr->outer_vlan_info.port_vlan_ena = false;
332 		}
333 	}
334 
335 	return err;
336 }
337 
338 static int
ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)339 ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
340 			      enum rte_vlan_type vlan_type, uint16_t tpid)
341 {
342 	struct ice_dcf_vf_repr *repr = dev->data->dev_private;
343 	int err = 0;
344 
345 	if (!ice_dcf_vlan_offload_ena(repr))
346 		return -ENOTSUP;
347 
348 	if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
349 		PMD_DRV_LOG(ERR,
350 			    "Can accelerate only outer VLAN in QinQ\n");
351 		return -EINVAL;
352 	}
353 
354 	if (tpid != RTE_ETHER_TYPE_QINQ &&
355 	    tpid != RTE_ETHER_TYPE_VLAN &&
356 	    tpid != RTE_ETHER_TYPE_QINQ1) {
357 		PMD_DRV_LOG(ERR,
358 			    "Invalid TPID: 0x%04x\n", tpid);
359 		return -EINVAL;
360 	}
361 
362 	repr->outer_vlan_info.tpid = tpid;
363 
364 	if (repr->outer_vlan_info.port_vlan_ena) {
365 		err = ice_dcf_vf_repr_vlan_pvid_set(dev,
366 						    repr->outer_vlan_info.vid,
367 						    true);
368 		if (err) {
369 			PMD_DRV_LOG(ERR,
370 				    "Failed to reset port VLAN : %d\n",
371 				    err);
372 			return err;
373 		}
374 	}
375 
376 	if (repr->outer_vlan_info.stripping_ena) {
377 		err = ice_dcf_vf_repr_vlan_offload_set(dev,
378 						       RTE_ETH_VLAN_STRIP_MASK);
379 		if (err) {
380 			PMD_DRV_LOG(ERR,
381 				    "Failed to reset VLAN stripping : %d\n",
382 				    err);
383 			return err;
384 		}
385 	}
386 
387 	return 0;
388 }
389 
390 static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
391 	.dev_configure        = ice_dcf_vf_repr_dev_configure,
392 	.dev_start            = ice_dcf_vf_repr_dev_start,
393 	.dev_stop             = ice_dcf_vf_repr_dev_stop,
394 	.dev_close            = ice_dcf_vf_repr_dev_close,
395 	.dev_infos_get        = ice_dcf_vf_repr_dev_info_get,
396 	.rx_queue_setup       = ice_dcf_vf_repr_rx_queue_setup,
397 	.tx_queue_setup       = ice_dcf_vf_repr_tx_queue_setup,
398 	.promiscuous_enable   = ice_dcf_vf_repr_promiscuous_enable,
399 	.promiscuous_disable  = ice_dcf_vf_repr_promiscuous_disable,
400 	.allmulticast_enable  = ice_dcf_vf_repr_allmulticast_enable,
401 	.allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
402 	.link_update          = ice_dcf_vf_repr_link_update,
403 	.vlan_offload_set     = ice_dcf_vf_repr_vlan_offload_set,
404 	.vlan_pvid_set        = ice_dcf_vf_repr_vlan_pvid_set,
405 	.vlan_tpid_set        = ice_dcf_vf_repr_vlan_tpid_set,
406 };
407 
408 int
ice_dcf_vf_repr_init(struct rte_eth_dev * vf_rep_eth_dev,void * init_param)409 ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param)
410 {
411 	struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
412 	struct ice_dcf_vf_repr_param *param = init_param;
413 
414 	repr->dcf_eth_dev = param->dcf_eth_dev;
415 	repr->switch_domain_id = param->switch_domain_id;
416 	repr->vf_id = param->vf_id;
417 	repr->outer_vlan_info.port_vlan_ena = false;
418 	repr->outer_vlan_info.stripping_ena = false;
419 	repr->outer_vlan_info.tpid = RTE_ETHER_TYPE_VLAN;
420 
421 	vf_rep_eth_dev->dev_ops = &ice_dcf_vf_repr_dev_ops;
422 
423 	vf_rep_eth_dev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
424 	vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
425 
426 	vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
427 	vf_rep_eth_dev->data->representor_id = repr->vf_id;
428 	vf_rep_eth_dev->data->backer_port_id = repr->dcf_eth_dev->data->port_id;
429 
430 	vf_rep_eth_dev->data->mac_addrs = &repr->mac_addr;
431 
432 	rte_eth_random_addr(repr->mac_addr.addr_bytes);
433 
434 	return 0;
435 }
436 
437 int
ice_dcf_vf_repr_uninit(struct rte_eth_dev * vf_rep_eth_dev)438 ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev)
439 {
440 	vf_rep_eth_dev->data->mac_addrs = NULL;
441 
442 	return 0;
443 }
444 
445 int
ice_dcf_vf_repr_init_vlan(struct rte_eth_dev * vf_rep_eth_dev)446 ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev)
447 {
448 	struct ice_dcf_vf_repr *repr = vf_rep_eth_dev->data->dev_private;
449 	int err;
450 
451 	err = ice_dcf_vf_repr_vlan_offload_set(vf_rep_eth_dev,
452 					       RTE_ETH_VLAN_STRIP_MASK);
453 	if (err) {
454 		PMD_DRV_LOG(ERR, "Failed to set VLAN offload");
455 		return err;
456 	}
457 
458 	if (repr->outer_vlan_info.port_vlan_ena) {
459 		err = ice_dcf_vf_repr_vlan_pvid_set(vf_rep_eth_dev,
460 						    repr->outer_vlan_info.vid,
461 						    true);
462 		if (err) {
463 			PMD_DRV_LOG(ERR, "Failed to enable port VLAN");
464 			return err;
465 		}
466 	}
467 
468 	return 0;
469 }
470 
471 void
ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter * dcf_adapter)472 ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter)
473 {
474 	uint16_t vf_id;
475 	int ret;
476 
477 	if (!dcf_adapter->repr_infos)
478 		return;
479 
480 	for (vf_id = 0; vf_id < dcf_adapter->real_hw.num_vfs; vf_id++) {
481 		struct rte_eth_dev *vf_rep_eth_dev =
482 				dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev;
483 		if (!vf_rep_eth_dev || vf_rep_eth_dev->data->dev_started == 0)
484 			continue;
485 
486 		ret = ice_dcf_vf_repr_dev_stop(vf_rep_eth_dev);
487 		if (!ret)
488 			vf_rep_eth_dev->data->dev_started = 0;
489 	}
490 }
491