xref: /f-stack/dpdk/drivers/net/liquidio/lio_ethdev.c (revision 490ee526)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_alarm.h>
10 #include <rte_ether.h>
11 
12 #include "lio_logs.h"
13 #include "lio_23xx_vf.h"
14 #include "lio_ethdev.h"
15 #include "lio_rxtx.h"
16 
17 int lio_logtype_init;
18 int lio_logtype_driver;
19 
20 /* Default RSS key in use */
21 static uint8_t lio_rss_key[40] = {
22 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
23 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
24 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
25 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
26 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
27 };
28 
29 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
30 	.nb_max		= CN23XX_MAX_OQ_DESCRIPTORS,
31 	.nb_min		= CN23XX_MIN_OQ_DESCRIPTORS,
32 	.nb_align	= 1,
33 };
34 
35 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
36 	.nb_max		= CN23XX_MAX_IQ_DESCRIPTORS,
37 	.nb_min		= CN23XX_MIN_IQ_DESCRIPTORS,
38 	.nb_align	= 1,
39 };
40 
41 /* Wait for control command to reach nic. */
42 static uint16_t
43 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
44 		      struct lio_dev_ctrl_cmd *ctrl_cmd)
45 {
46 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
47 
48 	while ((ctrl_cmd->cond == 0) && --timeout) {
49 		lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
50 		rte_delay_ms(1);
51 	}
52 
53 	return !timeout;
54 }
55 
56 /**
57  * \brief Send Rx control command
58  * @param eth_dev Pointer to the structure rte_eth_dev
59  * @param start_stop whether to start or stop
60  */
61 static int
62 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
63 {
64 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
65 	struct lio_dev_ctrl_cmd ctrl_cmd;
66 	struct lio_ctrl_pkt ctrl_pkt;
67 
68 	/* flush added to prevent cmd failure
69 	 * incase the queue is full
70 	 */
71 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
72 
73 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
74 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
75 
76 	ctrl_cmd.eth_dev = eth_dev;
77 	ctrl_cmd.cond = 0;
78 
79 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
80 	ctrl_pkt.ncmd.s.param1 = start_stop;
81 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
82 
83 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
84 		lio_dev_err(lio_dev, "Failed to send RX Control message\n");
85 		return -1;
86 	}
87 
88 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
89 		lio_dev_err(lio_dev, "RX Control command timed out\n");
90 		return -1;
91 	}
92 
93 	return 0;
94 }
95 
96 /* store statistics names and its offset in stats structure */
97 struct rte_lio_xstats_name_off {
98 	char name[RTE_ETH_XSTATS_NAME_SIZE];
99 	unsigned int offset;
100 };
101 
102 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
103 	{"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
104 	{"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
105 	{"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
106 	{"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
107 	{"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
108 	{"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
109 	{"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
110 	{"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
111 	{"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
112 	{"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
113 	{"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
114 	{"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
115 	{"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
116 	{"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
117 						sizeof(struct octeon_rx_stats)},
118 	{"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
119 						sizeof(struct octeon_rx_stats)},
120 	{"tx_broadcast_pkts",
121 		(offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
122 			sizeof(struct octeon_rx_stats)},
123 	{"tx_multicast_pkts",
124 		(offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
125 			sizeof(struct octeon_rx_stats)},
126 	{"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
127 						sizeof(struct octeon_rx_stats)},
128 	{"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
129 						sizeof(struct octeon_rx_stats)},
130 	{"tx_total_collisions", (offsetof(struct octeon_tx_stats,
131 					  total_collisions)) +
132 						sizeof(struct octeon_rx_stats)},
133 	{"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
134 						sizeof(struct octeon_rx_stats)},
135 	{"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
136 						sizeof(struct octeon_rx_stats)},
137 };
138 
139 #define LIO_NB_XSTATS	RTE_DIM(rte_lio_stats_strings)
140 
141 /* Get hw stats of the port */
142 static int
143 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
144 		   unsigned int n)
145 {
146 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
147 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
148 	struct octeon_link_stats *hw_stats;
149 	struct lio_link_stats_resp *resp;
150 	struct lio_soft_command *sc;
151 	uint32_t resp_size;
152 	unsigned int i;
153 	int retval;
154 
155 	if (!lio_dev->intf_open) {
156 		lio_dev_err(lio_dev, "Port %d down\n",
157 			    lio_dev->port_id);
158 		return -EINVAL;
159 	}
160 
161 	if (n < LIO_NB_XSTATS)
162 		return LIO_NB_XSTATS;
163 
164 	resp_size = sizeof(struct lio_link_stats_resp);
165 	sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
166 	if (sc == NULL)
167 		return -ENOMEM;
168 
169 	resp = (struct lio_link_stats_resp *)sc->virtrptr;
170 	lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
171 				 LIO_OPCODE_PORT_STATS, 0, 0, 0);
172 
173 	/* Setting wait time in seconds */
174 	sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
175 
176 	retval = lio_send_soft_command(lio_dev, sc);
177 	if (retval == LIO_IQ_SEND_FAILED) {
178 		lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
179 			    retval);
180 		goto get_stats_fail;
181 	}
182 
183 	while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
184 		lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
185 		lio_process_ordered_list(lio_dev);
186 		rte_delay_ms(1);
187 	}
188 
189 	retval = resp->status;
190 	if (retval) {
191 		lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
192 		goto get_stats_fail;
193 	}
194 
195 	lio_swap_8B_data((uint64_t *)(&resp->link_stats),
196 			 sizeof(struct octeon_link_stats) >> 3);
197 
198 	hw_stats = &resp->link_stats;
199 
200 	for (i = 0; i < LIO_NB_XSTATS; i++) {
201 		xstats[i].id = i;
202 		xstats[i].value =
203 		    *(uint64_t *)(((char *)hw_stats) +
204 					rte_lio_stats_strings[i].offset);
205 	}
206 
207 	lio_free_soft_command(sc);
208 
209 	return LIO_NB_XSTATS;
210 
211 get_stats_fail:
212 	lio_free_soft_command(sc);
213 
214 	return -1;
215 }
216 
217 static int
218 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
219 			 struct rte_eth_xstat_name *xstats_names,
220 			 unsigned limit __rte_unused)
221 {
222 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
223 	unsigned int i;
224 
225 	if (!lio_dev->intf_open) {
226 		lio_dev_err(lio_dev, "Port %d down\n",
227 			    lio_dev->port_id);
228 		return -EINVAL;
229 	}
230 
231 	if (xstats_names == NULL)
232 		return LIO_NB_XSTATS;
233 
234 	/* Note: limit checked in rte_eth_xstats_names() */
235 
236 	for (i = 0; i < LIO_NB_XSTATS; i++) {
237 		snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
238 			 "%s", rte_lio_stats_strings[i].name);
239 	}
240 
241 	return LIO_NB_XSTATS;
242 }
243 
244 /* Reset hw stats for the port */
245 static void
246 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
247 {
248 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
249 	struct lio_dev_ctrl_cmd ctrl_cmd;
250 	struct lio_ctrl_pkt ctrl_pkt;
251 
252 	if (!lio_dev->intf_open) {
253 		lio_dev_err(lio_dev, "Port %d down\n",
254 			    lio_dev->port_id);
255 		return;
256 	}
257 
258 	/* flush added to prevent cmd failure
259 	 * incase the queue is full
260 	 */
261 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
262 
263 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
264 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
265 
266 	ctrl_cmd.eth_dev = eth_dev;
267 	ctrl_cmd.cond = 0;
268 
269 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
270 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
271 
272 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
273 		lio_dev_err(lio_dev, "Failed to send clear stats command\n");
274 		return;
275 	}
276 
277 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
278 		lio_dev_err(lio_dev, "Clear stats command timed out\n");
279 		return;
280 	}
281 
282 	/* clear stored per queue stats */
283 	RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
284 	(*eth_dev->dev_ops->stats_reset)(eth_dev);
285 }
286 
287 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
288 static int
289 lio_dev_stats_get(struct rte_eth_dev *eth_dev,
290 		  struct rte_eth_stats *stats)
291 {
292 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
293 	struct lio_droq_stats *oq_stats;
294 	struct lio_iq_stats *iq_stats;
295 	struct lio_instr_queue *txq;
296 	struct lio_droq *droq;
297 	int i, iq_no, oq_no;
298 	uint64_t bytes = 0;
299 	uint64_t pkts = 0;
300 	uint64_t drop = 0;
301 
302 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
303 		iq_no = lio_dev->linfo.txpciq[i].s.q_no;
304 		txq = lio_dev->instr_queue[iq_no];
305 		if (txq != NULL) {
306 			iq_stats = &txq->stats;
307 			pkts += iq_stats->tx_done;
308 			drop += iq_stats->tx_dropped;
309 			bytes += iq_stats->tx_tot_bytes;
310 		}
311 	}
312 
313 	stats->opackets = pkts;
314 	stats->obytes = bytes;
315 	stats->oerrors = drop;
316 
317 	pkts = 0;
318 	drop = 0;
319 	bytes = 0;
320 
321 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
322 		oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
323 		droq = lio_dev->droq[oq_no];
324 		if (droq != NULL) {
325 			oq_stats = &droq->stats;
326 			pkts += oq_stats->rx_pkts_received;
327 			drop += (oq_stats->rx_dropped +
328 					oq_stats->dropped_toomany +
329 					oq_stats->dropped_nomem);
330 			bytes += oq_stats->rx_bytes_received;
331 		}
332 	}
333 	stats->ibytes = bytes;
334 	stats->ipackets = pkts;
335 	stats->ierrors = drop;
336 
337 	return 0;
338 }
339 
340 static void
341 lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
342 {
343 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
344 	struct lio_droq_stats *oq_stats;
345 	struct lio_iq_stats *iq_stats;
346 	struct lio_instr_queue *txq;
347 	struct lio_droq *droq;
348 	int i, iq_no, oq_no;
349 
350 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
351 		iq_no = lio_dev->linfo.txpciq[i].s.q_no;
352 		txq = lio_dev->instr_queue[iq_no];
353 		if (txq != NULL) {
354 			iq_stats = &txq->stats;
355 			memset(iq_stats, 0, sizeof(struct lio_iq_stats));
356 		}
357 	}
358 
359 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
360 		oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
361 		droq = lio_dev->droq[oq_no];
362 		if (droq != NULL) {
363 			oq_stats = &droq->stats;
364 			memset(oq_stats, 0, sizeof(struct lio_droq_stats));
365 		}
366 	}
367 }
368 
369 static void
370 lio_dev_info_get(struct rte_eth_dev *eth_dev,
371 		 struct rte_eth_dev_info *devinfo)
372 {
373 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
374 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
375 
376 	switch (pci_dev->id.subsystem_device_id) {
377 	/* CN23xx 10G cards */
378 	case PCI_SUBSYS_DEV_ID_CN2350_210:
379 	case PCI_SUBSYS_DEV_ID_CN2360_210:
380 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
381 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
382 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
383 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
384 		devinfo->speed_capa = ETH_LINK_SPEED_10G;
385 		break;
386 	/* CN23xx 25G cards */
387 	case PCI_SUBSYS_DEV_ID_CN2350_225:
388 	case PCI_SUBSYS_DEV_ID_CN2360_225:
389 		devinfo->speed_capa = ETH_LINK_SPEED_25G;
390 		break;
391 	default:
392 		devinfo->speed_capa = ETH_LINK_SPEED_10G;
393 		lio_dev_err(lio_dev,
394 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
395 	}
396 
397 	devinfo->max_rx_queues = lio_dev->max_rx_queues;
398 	devinfo->max_tx_queues = lio_dev->max_tx_queues;
399 
400 	devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
401 	devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
402 
403 	devinfo->max_mac_addrs = 1;
404 
405 	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
406 				    DEV_RX_OFFLOAD_UDP_CKSUM		|
407 				    DEV_RX_OFFLOAD_TCP_CKSUM		|
408 				    DEV_RX_OFFLOAD_VLAN_STRIP);
409 	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
410 				    DEV_TX_OFFLOAD_UDP_CKSUM		|
411 				    DEV_TX_OFFLOAD_TCP_CKSUM		|
412 				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
413 
414 	devinfo->rx_desc_lim = lio_rx_desc_lim;
415 	devinfo->tx_desc_lim = lio_tx_desc_lim;
416 
417 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
418 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
419 	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
420 					   ETH_RSS_NONFRAG_IPV4_TCP	|
421 					   ETH_RSS_IPV6			|
422 					   ETH_RSS_NONFRAG_IPV6_TCP	|
423 					   ETH_RSS_IPV6_EX		|
424 					   ETH_RSS_IPV6_TCP_EX);
425 }
426 
427 static int
428 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
429 {
430 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
431 	uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
432 	uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
433 	struct lio_dev_ctrl_cmd ctrl_cmd;
434 	struct lio_ctrl_pkt ctrl_pkt;
435 
436 	PMD_INIT_FUNC_TRACE();
437 
438 	if (!lio_dev->intf_open) {
439 		lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
440 			    lio_dev->port_id);
441 		return -EINVAL;
442 	}
443 
444 	/* check if VF MTU is within allowed range.
445 	 * New value should not exceed PF MTU.
446 	 */
447 	if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
448 		lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
449 			    ETHER_MIN_MTU, pf_mtu);
450 		return -EINVAL;
451 	}
452 
453 	/* flush added to prevent cmd failure
454 	 * incase the queue is full
455 	 */
456 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
457 
458 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
459 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
460 
461 	ctrl_cmd.eth_dev = eth_dev;
462 	ctrl_cmd.cond = 0;
463 
464 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
465 	ctrl_pkt.ncmd.s.param1 = mtu;
466 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
467 
468 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
469 		lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
470 		return -1;
471 	}
472 
473 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
474 		lio_dev_err(lio_dev, "Command to change MTU timed out\n");
475 		return -1;
476 	}
477 
478 	if (frame_len > ETHER_MAX_LEN)
479 		eth_dev->data->dev_conf.rxmode.offloads |=
480 			DEV_RX_OFFLOAD_JUMBO_FRAME;
481 	else
482 		eth_dev->data->dev_conf.rxmode.offloads &=
483 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
484 
485 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
486 	eth_dev->data->mtu = mtu;
487 
488 	return 0;
489 }
490 
491 static int
492 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
493 			struct rte_eth_rss_reta_entry64 *reta_conf,
494 			uint16_t reta_size)
495 {
496 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
497 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
498 	struct lio_rss_set *rss_param;
499 	struct lio_dev_ctrl_cmd ctrl_cmd;
500 	struct lio_ctrl_pkt ctrl_pkt;
501 	int i, j, index;
502 
503 	if (!lio_dev->intf_open) {
504 		lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
505 			    lio_dev->port_id);
506 		return -EINVAL;
507 	}
508 
509 	if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
510 		lio_dev_err(lio_dev,
511 			    "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
512 			    reta_size, LIO_RSS_MAX_TABLE_SZ);
513 		return -EINVAL;
514 	}
515 
516 	/* flush added to prevent cmd failure
517 	 * incase the queue is full
518 	 */
519 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
520 
521 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
522 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
523 
524 	rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
525 
526 	ctrl_cmd.eth_dev = eth_dev;
527 	ctrl_cmd.cond = 0;
528 
529 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
530 	ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
531 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
532 
533 	rss_param->param.flags = 0xF;
534 	rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
535 	rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
536 
537 	for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
538 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
539 			if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
540 				index = (i * RTE_RETA_GROUP_SIZE) + j;
541 				rss_state->itable[index] = reta_conf[i].reta[j];
542 			}
543 		}
544 	}
545 
546 	rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
547 	memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
548 
549 	lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
550 
551 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
552 		lio_dev_err(lio_dev, "Failed to set rss hash\n");
553 		return -1;
554 	}
555 
556 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
557 		lio_dev_err(lio_dev, "Set rss hash timed out\n");
558 		return -1;
559 	}
560 
561 	return 0;
562 }
563 
564 static int
565 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
566 		       struct rte_eth_rss_reta_entry64 *reta_conf,
567 		       uint16_t reta_size)
568 {
569 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
570 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
571 	int i, num;
572 
573 	if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
574 		lio_dev_err(lio_dev,
575 			    "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
576 			    reta_size, LIO_RSS_MAX_TABLE_SZ);
577 		return -EINVAL;
578 	}
579 
580 	num = reta_size / RTE_RETA_GROUP_SIZE;
581 
582 	for (i = 0; i < num; i++) {
583 		memcpy(reta_conf->reta,
584 		       &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
585 		       RTE_RETA_GROUP_SIZE);
586 		reta_conf++;
587 	}
588 
589 	return 0;
590 }
591 
592 static int
593 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
594 			  struct rte_eth_rss_conf *rss_conf)
595 {
596 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
597 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
598 	uint8_t *hash_key = NULL;
599 	uint64_t rss_hf = 0;
600 
601 	if (rss_state->hash_disable) {
602 		lio_dev_info(lio_dev, "RSS disabled in nic\n");
603 		rss_conf->rss_hf = 0;
604 		return 0;
605 	}
606 
607 	/* Get key value */
608 	hash_key = rss_conf->rss_key;
609 	if (hash_key != NULL)
610 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
611 
612 	if (rss_state->ip)
613 		rss_hf |= ETH_RSS_IPV4;
614 	if (rss_state->tcp_hash)
615 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
616 	if (rss_state->ipv6)
617 		rss_hf |= ETH_RSS_IPV6;
618 	if (rss_state->ipv6_tcp_hash)
619 		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
620 	if (rss_state->ipv6_ex)
621 		rss_hf |= ETH_RSS_IPV6_EX;
622 	if (rss_state->ipv6_tcp_ex_hash)
623 		rss_hf |= ETH_RSS_IPV6_TCP_EX;
624 
625 	rss_conf->rss_hf = rss_hf;
626 
627 	return 0;
628 }
629 
630 static int
631 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
632 			struct rte_eth_rss_conf *rss_conf)
633 {
634 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
635 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
636 	struct lio_rss_set *rss_param;
637 	struct lio_dev_ctrl_cmd ctrl_cmd;
638 	struct lio_ctrl_pkt ctrl_pkt;
639 
640 	if (!lio_dev->intf_open) {
641 		lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
642 			    lio_dev->port_id);
643 		return -EINVAL;
644 	}
645 
646 	/* flush added to prevent cmd failure
647 	 * incase the queue is full
648 	 */
649 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
650 
651 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
652 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
653 
654 	rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
655 
656 	ctrl_cmd.eth_dev = eth_dev;
657 	ctrl_cmd.cond = 0;
658 
659 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
660 	ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
661 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
662 
663 	rss_param->param.flags = 0xF;
664 
665 	if (rss_conf->rss_key) {
666 		rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
667 		rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
668 		rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
669 		memcpy(rss_state->hash_key, rss_conf->rss_key,
670 		       rss_state->hash_key_size);
671 		memcpy(rss_param->key, rss_state->hash_key,
672 		       rss_state->hash_key_size);
673 	}
674 
675 	if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
676 		/* Can't disable rss through hash flags,
677 		 * if it is enabled by default during init
678 		 */
679 		if (!rss_state->hash_disable)
680 			return -EINVAL;
681 
682 		/* This is for --disable-rss during testpmd launch */
683 		rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
684 	} else {
685 		uint32_t hashinfo = 0;
686 
687 		/* Can't enable rss if disabled by default during init */
688 		if (rss_state->hash_disable)
689 			return -EINVAL;
690 
691 		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
692 			hashinfo |= LIO_RSS_HASH_IPV4;
693 			rss_state->ip = 1;
694 		} else {
695 			rss_state->ip = 0;
696 		}
697 
698 		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
699 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
700 			rss_state->tcp_hash = 1;
701 		} else {
702 			rss_state->tcp_hash = 0;
703 		}
704 
705 		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
706 			hashinfo |= LIO_RSS_HASH_IPV6;
707 			rss_state->ipv6 = 1;
708 		} else {
709 			rss_state->ipv6 = 0;
710 		}
711 
712 		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
713 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
714 			rss_state->ipv6_tcp_hash = 1;
715 		} else {
716 			rss_state->ipv6_tcp_hash = 0;
717 		}
718 
719 		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
720 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
721 			rss_state->ipv6_ex = 1;
722 		} else {
723 			rss_state->ipv6_ex = 0;
724 		}
725 
726 		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
727 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
728 			rss_state->ipv6_tcp_ex_hash = 1;
729 		} else {
730 			rss_state->ipv6_tcp_ex_hash = 0;
731 		}
732 
733 		rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
734 		rss_param->param.hashinfo = hashinfo;
735 	}
736 
737 	lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
738 
739 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
740 		lio_dev_err(lio_dev, "Failed to set rss hash\n");
741 		return -1;
742 	}
743 
744 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
745 		lio_dev_err(lio_dev, "Set rss hash timed out\n");
746 		return -1;
747 	}
748 
749 	return 0;
750 }
751 
752 /**
753  * Add vxlan dest udp port for an interface.
754  *
755  * @param eth_dev
756  *  Pointer to the structure rte_eth_dev
757  * @param udp_tnl
758  *  udp tunnel conf
759  *
760  * @return
761  *  On success return 0
762  *  On failure return -1
763  */
764 static int
765 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
766 		       struct rte_eth_udp_tunnel *udp_tnl)
767 {
768 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
769 	struct lio_dev_ctrl_cmd ctrl_cmd;
770 	struct lio_ctrl_pkt ctrl_pkt;
771 
772 	if (udp_tnl == NULL)
773 		return -EINVAL;
774 
775 	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
776 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
777 		return -1;
778 	}
779 
780 	/* flush added to prevent cmd failure
781 	 * incase the queue is full
782 	 */
783 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
784 
785 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
786 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
787 
788 	ctrl_cmd.eth_dev = eth_dev;
789 	ctrl_cmd.cond = 0;
790 
791 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
792 	ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
793 	ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
794 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
795 
796 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
797 		lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
798 		return -1;
799 	}
800 
801 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
802 		lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
803 		return -1;
804 	}
805 
806 	return 0;
807 }
808 
809 /**
810  * Remove vxlan dest udp port for an interface.
811  *
812  * @param eth_dev
813  *  Pointer to the structure rte_eth_dev
814  * @param udp_tnl
815  *  udp tunnel conf
816  *
817  * @return
818  *  On success return 0
819  *  On failure return -1
820  */
821 static int
822 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
823 		       struct rte_eth_udp_tunnel *udp_tnl)
824 {
825 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
826 	struct lio_dev_ctrl_cmd ctrl_cmd;
827 	struct lio_ctrl_pkt ctrl_pkt;
828 
829 	if (udp_tnl == NULL)
830 		return -EINVAL;
831 
832 	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
833 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
834 		return -1;
835 	}
836 
837 	/* flush added to prevent cmd failure
838 	 * incase the queue is full
839 	 */
840 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
841 
842 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
843 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
844 
845 	ctrl_cmd.eth_dev = eth_dev;
846 	ctrl_cmd.cond = 0;
847 
848 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
849 	ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
850 	ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
851 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
852 
853 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
854 		lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
855 		return -1;
856 	}
857 
858 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
859 		lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
860 		return -1;
861 	}
862 
863 	return 0;
864 }
865 
866 static int
867 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
868 {
869 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
870 	struct lio_dev_ctrl_cmd ctrl_cmd;
871 	struct lio_ctrl_pkt ctrl_pkt;
872 
873 	if (lio_dev->linfo.vlan_is_admin_assigned)
874 		return -EPERM;
875 
876 	/* flush added to prevent cmd failure
877 	 * incase the queue is full
878 	 */
879 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
880 
881 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
882 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
883 
884 	ctrl_cmd.eth_dev = eth_dev;
885 	ctrl_cmd.cond = 0;
886 
887 	ctrl_pkt.ncmd.s.cmd = on ?
888 			LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
889 	ctrl_pkt.ncmd.s.param1 = vlan_id;
890 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
891 
892 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
893 		lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
894 			    on ? "add" : "remove");
895 		return -1;
896 	}
897 
898 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
899 		lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
900 			    on ? "add" : "remove");
901 		return -1;
902 	}
903 
904 	return 0;
905 }
906 
907 static uint64_t
908 lio_hweight64(uint64_t w)
909 {
910 	uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
911 
912 	res =
913 	    (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
914 	res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
915 	res = res + (res >> 8);
916 	res = res + (res >> 16);
917 
918 	return (res + (res >> 32)) & 0x00000000000000FFul;
919 }
920 
921 static int
922 lio_dev_link_update(struct rte_eth_dev *eth_dev,
923 		    int wait_to_complete __rte_unused)
924 {
925 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
926 	struct rte_eth_link link;
927 
928 	/* Initialize */
929 	memset(&link, 0, sizeof(link));
930 	link.link_status = ETH_LINK_DOWN;
931 	link.link_speed = ETH_SPEED_NUM_NONE;
932 	link.link_duplex = ETH_LINK_HALF_DUPLEX;
933 	link.link_autoneg = ETH_LINK_AUTONEG;
934 
935 	/* Return what we found */
936 	if (lio_dev->linfo.link.s.link_up == 0) {
937 		/* Interface is down */
938 		return rte_eth_linkstatus_set(eth_dev, &link);
939 	}
940 
941 	link.link_status = ETH_LINK_UP; /* Interface is up */
942 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
943 	switch (lio_dev->linfo.link.s.speed) {
944 	case LIO_LINK_SPEED_10000:
945 		link.link_speed = ETH_SPEED_NUM_10G;
946 		break;
947 	case LIO_LINK_SPEED_25000:
948 		link.link_speed = ETH_SPEED_NUM_25G;
949 		break;
950 	default:
951 		link.link_speed = ETH_SPEED_NUM_NONE;
952 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
953 	}
954 
955 	return rte_eth_linkstatus_set(eth_dev, &link);
956 }
957 
958 /**
959  * \brief Net device enable, disable allmulticast
960  * @param eth_dev Pointer to the structure rte_eth_dev
961  */
962 static void
963 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
964 {
965 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
966 	struct lio_dev_ctrl_cmd ctrl_cmd;
967 	struct lio_ctrl_pkt ctrl_pkt;
968 
969 	/* flush added to prevent cmd failure
970 	 * incase the queue is full
971 	 */
972 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
973 
974 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
975 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
976 
977 	ctrl_cmd.eth_dev = eth_dev;
978 	ctrl_cmd.cond = 0;
979 
980 	/* Create a ctrl pkt command to be sent to core app. */
981 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
982 	ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
983 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
984 
985 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
986 		lio_dev_err(lio_dev, "Failed to send change flag message\n");
987 		return;
988 	}
989 
990 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
991 		lio_dev_err(lio_dev, "Change dev flag command timed out\n");
992 }
993 
994 static void
995 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
996 {
997 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
998 
999 	if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1000 		lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1001 			    LIO_VF_TRUST_MIN_VERSION);
1002 		return;
1003 	}
1004 
1005 	if (!lio_dev->intf_open) {
1006 		lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
1007 			    lio_dev->port_id);
1008 		return;
1009 	}
1010 
1011 	lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
1012 	lio_change_dev_flag(eth_dev);
1013 }
1014 
1015 static void
1016 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1017 {
1018 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1019 
1020 	if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1021 		lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1022 			    LIO_VF_TRUST_MIN_VERSION);
1023 		return;
1024 	}
1025 
1026 	if (!lio_dev->intf_open) {
1027 		lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
1028 			    lio_dev->port_id);
1029 		return;
1030 	}
1031 
1032 	lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
1033 	lio_change_dev_flag(eth_dev);
1034 }
1035 
1036 static void
1037 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
1038 {
1039 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1040 
1041 	if (!lio_dev->intf_open) {
1042 		lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
1043 			    lio_dev->port_id);
1044 		return;
1045 	}
1046 
1047 	lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
1048 	lio_change_dev_flag(eth_dev);
1049 }
1050 
1051 static void
1052 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
1053 {
1054 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1055 
1056 	if (!lio_dev->intf_open) {
1057 		lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
1058 			    lio_dev->port_id);
1059 		return;
1060 	}
1061 
1062 	lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
1063 	lio_change_dev_flag(eth_dev);
1064 }
1065 
1066 static void
1067 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
1068 {
1069 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1070 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1071 	struct rte_eth_rss_reta_entry64 reta_conf[8];
1072 	struct rte_eth_rss_conf rss_conf;
1073 	uint16_t i;
1074 
1075 	/* Configure the RSS key and the RSS protocols used to compute
1076 	 * the RSS hash of input packets.
1077 	 */
1078 	rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1079 	if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
1080 		rss_state->hash_disable = 1;
1081 		lio_dev_rss_hash_update(eth_dev, &rss_conf);
1082 		return;
1083 	}
1084 
1085 	if (rss_conf.rss_key == NULL)
1086 		rss_conf.rss_key = lio_rss_key; /* Default hash key */
1087 
1088 	lio_dev_rss_hash_update(eth_dev, &rss_conf);
1089 
1090 	memset(reta_conf, 0, sizeof(reta_conf));
1091 	for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
1092 		uint8_t q_idx, conf_idx, reta_idx;
1093 
1094 		q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
1095 				  i % eth_dev->data->nb_rx_queues : 0);
1096 		conf_idx = i / RTE_RETA_GROUP_SIZE;
1097 		reta_idx = i % RTE_RETA_GROUP_SIZE;
1098 		reta_conf[conf_idx].reta[reta_idx] = q_idx;
1099 		reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
1100 	}
1101 
1102 	lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
1103 }
1104 
1105 static void
1106 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
1107 {
1108 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1109 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1110 	struct rte_eth_rss_conf rss_conf;
1111 
1112 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
1113 	case ETH_MQ_RX_RSS:
1114 		lio_dev_rss_configure(eth_dev);
1115 		break;
1116 	case ETH_MQ_RX_NONE:
1117 	/* if mq_mode is none, disable rss mode. */
1118 	default:
1119 		memset(&rss_conf, 0, sizeof(rss_conf));
1120 		rss_state->hash_disable = 1;
1121 		lio_dev_rss_hash_update(eth_dev, &rss_conf);
1122 	}
1123 }
1124 
1125 /**
1126  * Setup our receive queue/ringbuffer. This is the
1127  * queue the Octeon uses to send us packets and
1128  * responses. We are given a memory pool for our
1129  * packet buffers that are used to populate the receive
1130  * queue.
1131  *
1132  * @param eth_dev
1133  *    Pointer to the structure rte_eth_dev
1134  * @param q_no
1135  *    Queue number
1136  * @param num_rx_descs
1137  *    Number of entries in the queue
1138  * @param socket_id
1139  *    Where to allocate memory
1140  * @param rx_conf
1141  *    Pointer to the struction rte_eth_rxconf
1142  * @param mp
1143  *    Pointer to the packet pool
1144  *
1145  * @return
1146  *    - On success, return 0
1147  *    - On failure, return -1
1148  */
1149 static int
1150 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1151 		       uint16_t num_rx_descs, unsigned int socket_id,
1152 		       const struct rte_eth_rxconf *rx_conf __rte_unused,
1153 		       struct rte_mempool *mp)
1154 {
1155 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1156 	struct rte_pktmbuf_pool_private *mbp_priv;
1157 	uint32_t fw_mapped_oq;
1158 	uint16_t buf_size;
1159 
1160 	if (q_no >= lio_dev->nb_rx_queues) {
1161 		lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
1162 		return -EINVAL;
1163 	}
1164 
1165 	lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
1166 
1167 	fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
1168 
1169 	/* Free previous allocation if any */
1170 	if (eth_dev->data->rx_queues[q_no] != NULL) {
1171 		lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
1172 		eth_dev->data->rx_queues[q_no] = NULL;
1173 	}
1174 
1175 	mbp_priv = rte_mempool_get_priv(mp);
1176 	buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1177 
1178 	if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
1179 			   socket_id)) {
1180 		lio_dev_err(lio_dev, "droq allocation failed\n");
1181 		return -1;
1182 	}
1183 
1184 	eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
1185 
1186 	return 0;
1187 }
1188 
1189 /**
1190  * Release the receive queue/ringbuffer. Called by
1191  * the upper layers.
1192  *
1193  * @param rxq
1194  *    Opaque pointer to the receive queue to release
1195  *
1196  * @return
1197  *    - nothing
1198  */
1199 void
1200 lio_dev_rx_queue_release(void *rxq)
1201 {
1202 	struct lio_droq *droq = rxq;
1203 	int oq_no;
1204 
1205 	if (droq) {
1206 		oq_no = droq->q_no;
1207 		lio_delete_droq_queue(droq->lio_dev, oq_no);
1208 	}
1209 }
1210 
1211 /**
1212  * Allocate and initialize SW ring. Initialize associated HW registers.
1213  *
1214  * @param eth_dev
1215  *   Pointer to structure rte_eth_dev
1216  *
1217  * @param q_no
1218  *   Queue number
1219  *
1220  * @param num_tx_descs
1221  *   Number of ringbuffer descriptors
1222  *
1223  * @param socket_id
1224  *   NUMA socket id, used for memory allocations
1225  *
1226  * @param tx_conf
1227  *   Pointer to the structure rte_eth_txconf
1228  *
1229  * @return
1230  *   - On success, return 0
1231  *   - On failure, return -errno value
1232  */
1233 static int
1234 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1235 		       uint16_t num_tx_descs, unsigned int socket_id,
1236 		       const struct rte_eth_txconf *tx_conf __rte_unused)
1237 {
1238 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1239 	int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
1240 	int retval;
1241 
1242 	if (q_no >= lio_dev->nb_tx_queues) {
1243 		lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
1244 		return -EINVAL;
1245 	}
1246 
1247 	lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
1248 
1249 	/* Free previous allocation if any */
1250 	if (eth_dev->data->tx_queues[q_no] != NULL) {
1251 		lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
1252 		eth_dev->data->tx_queues[q_no] = NULL;
1253 	}
1254 
1255 	retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
1256 			      num_tx_descs, lio_dev, socket_id);
1257 
1258 	if (retval) {
1259 		lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
1260 		return retval;
1261 	}
1262 
1263 	retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1264 				lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
1265 				socket_id);
1266 
1267 	if (retval) {
1268 		lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
1269 		return retval;
1270 	}
1271 
1272 	eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
1273 
1274 	return 0;
1275 }
1276 
1277 /**
1278  * Release the transmit queue/ringbuffer. Called by
1279  * the upper layers.
1280  *
1281  * @param txq
1282  *    Opaque pointer to the transmit queue to release
1283  *
1284  * @return
1285  *    - nothing
1286  */
1287 void
1288 lio_dev_tx_queue_release(void *txq)
1289 {
1290 	struct lio_instr_queue *tq = txq;
1291 	uint32_t fw_mapped_iq_no;
1292 
1293 
1294 	if (tq) {
1295 		/* Free sg_list */
1296 		lio_delete_sglist(tq);
1297 
1298 		fw_mapped_iq_no = tq->txpciq.s.q_no;
1299 		lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
1300 	}
1301 }
1302 
1303 /**
1304  * Api to check link state.
1305  */
1306 static void
1307 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
1308 {
1309 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1310 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1311 	struct lio_link_status_resp *resp;
1312 	union octeon_link_status *ls;
1313 	struct lio_soft_command *sc;
1314 	uint32_t resp_size;
1315 
1316 	if (!lio_dev->intf_open)
1317 		return;
1318 
1319 	resp_size = sizeof(struct lio_link_status_resp);
1320 	sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1321 	if (sc == NULL)
1322 		return;
1323 
1324 	resp = (struct lio_link_status_resp *)sc->virtrptr;
1325 	lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1326 				 LIO_OPCODE_INFO, 0, 0, 0);
1327 
1328 	/* Setting wait time in seconds */
1329 	sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1330 
1331 	if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
1332 		goto get_status_fail;
1333 
1334 	while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1335 		lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1336 		rte_delay_ms(1);
1337 	}
1338 
1339 	if (resp->status)
1340 		goto get_status_fail;
1341 
1342 	ls = &resp->link_info.link;
1343 
1344 	lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
1345 
1346 	if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
1347 		if (ls->s.mtu < eth_dev->data->mtu) {
1348 			lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
1349 				     ls->s.mtu);
1350 			eth_dev->data->mtu = ls->s.mtu;
1351 		}
1352 		lio_dev->linfo.link.link_status64 = ls->link_status64;
1353 		lio_dev_link_update(eth_dev, 0);
1354 	}
1355 
1356 	lio_free_soft_command(sc);
1357 
1358 	return;
1359 
1360 get_status_fail:
1361 	lio_free_soft_command(sc);
1362 }
1363 
1364 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
1365  * and will update link state if it changes.
1366  */
1367 static void
1368 lio_sync_link_state_check(void *eth_dev)
1369 {
1370 	struct lio_device *lio_dev =
1371 		(((struct rte_eth_dev *)eth_dev)->data->dev_private);
1372 
1373 	if (lio_dev->port_configured)
1374 		lio_dev_get_link_status(eth_dev);
1375 
1376 	/* Schedule periodic link status check.
1377 	 * Stop check if interface is close and start again while opening.
1378 	 */
1379 	if (lio_dev->intf_open)
1380 		rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
1381 				  eth_dev);
1382 }
1383 
1384 static int
1385 lio_dev_start(struct rte_eth_dev *eth_dev)
1386 {
1387 	uint16_t mtu;
1388 	uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1389 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1390 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1391 	int ret = 0;
1392 
1393 	lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
1394 
1395 	if (lio_dev->fn_list.enable_io_queues(lio_dev))
1396 		return -1;
1397 
1398 	if (lio_send_rx_ctrl_cmd(eth_dev, 1))
1399 		return -1;
1400 
1401 	/* Ready for link status updates */
1402 	lio_dev->intf_open = 1;
1403 	rte_mb();
1404 
1405 	/* Configure RSS if device configured with multiple RX queues. */
1406 	lio_dev_mq_rx_configure(eth_dev);
1407 
1408 	/* Before update the link info,
1409 	 * must set linfo.link.link_status64 to 0.
1410 	 */
1411 	lio_dev->linfo.link.link_status64 = 0;
1412 
1413 	/* start polling for lsc */
1414 	ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
1415 				lio_sync_link_state_check,
1416 				eth_dev);
1417 	if (ret) {
1418 		lio_dev_err(lio_dev,
1419 			    "link state check handler creation failed\n");
1420 		goto dev_lsc_handle_error;
1421 	}
1422 
1423 	while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
1424 		rte_delay_ms(1);
1425 
1426 	if (lio_dev->linfo.link.link_status64 == 0) {
1427 		ret = -1;
1428 		goto dev_mtu_set_error;
1429 	}
1430 
1431 	mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
1432 	if (mtu < ETHER_MIN_MTU)
1433 		mtu = ETHER_MIN_MTU;
1434 
1435 	if (eth_dev->data->mtu != mtu) {
1436 		ret = lio_dev_mtu_set(eth_dev, mtu);
1437 		if (ret)
1438 			goto dev_mtu_set_error;
1439 	}
1440 
1441 	return 0;
1442 
1443 dev_mtu_set_error:
1444 	rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1445 
1446 dev_lsc_handle_error:
1447 	lio_dev->intf_open = 0;
1448 	lio_send_rx_ctrl_cmd(eth_dev, 0);
1449 
1450 	return ret;
1451 }
1452 
1453 /* Stop device and disable input/output functions */
1454 static void
1455 lio_dev_stop(struct rte_eth_dev *eth_dev)
1456 {
1457 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1458 
1459 	lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1460 	lio_dev->intf_open = 0;
1461 	rte_mb();
1462 
1463 	/* Cancel callback if still running. */
1464 	rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1465 
1466 	lio_send_rx_ctrl_cmd(eth_dev, 0);
1467 
1468 	lio_wait_for_instr_fetch(lio_dev);
1469 
1470 	/* Clear recorded link status */
1471 	lio_dev->linfo.link.link_status64 = 0;
1472 }
1473 
1474 static int
1475 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
1476 {
1477 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1478 
1479 	if (!lio_dev->intf_open) {
1480 		lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1481 		return 0;
1482 	}
1483 
1484 	if (lio_dev->linfo.link.s.link_up) {
1485 		lio_dev_info(lio_dev, "Link is already UP\n");
1486 		return 0;
1487 	}
1488 
1489 	if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
1490 		lio_dev_err(lio_dev, "Unable to set Link UP\n");
1491 		return -1;
1492 	}
1493 
1494 	lio_dev->linfo.link.s.link_up = 1;
1495 	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1496 
1497 	return 0;
1498 }
1499 
1500 static int
1501 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1502 {
1503 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1504 
1505 	if (!lio_dev->intf_open) {
1506 		lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1507 		return 0;
1508 	}
1509 
1510 	if (!lio_dev->linfo.link.s.link_up) {
1511 		lio_dev_info(lio_dev, "Link is already DOWN\n");
1512 		return 0;
1513 	}
1514 
1515 	lio_dev->linfo.link.s.link_up = 0;
1516 	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1517 
1518 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1519 		lio_dev->linfo.link.s.link_up = 1;
1520 		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1521 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
1522 		return -1;
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 /**
1529  * Reset and stop the device. This occurs on the first
1530  * call to this routine. Subsequent calls will simply
1531  * return. NB: This will require the NIC to be rebooted.
1532  *
1533  * @param eth_dev
1534  *    Pointer to the structure rte_eth_dev
1535  *
1536  * @return
1537  *    - nothing
1538  */
1539 static void
1540 lio_dev_close(struct rte_eth_dev *eth_dev)
1541 {
1542 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1543 
1544 	lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
1545 
1546 	if (lio_dev->intf_open)
1547 		lio_dev_stop(eth_dev);
1548 
1549 	/* Reset ioq regs */
1550 	lio_dev->fn_list.setup_device_regs(lio_dev);
1551 
1552 	if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
1553 		cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1554 		rte_delay_ms(LIO_PCI_FLR_WAIT);
1555 	}
1556 
1557 	/* lio_free_mbox */
1558 	lio_dev->fn_list.free_mbox(lio_dev);
1559 
1560 	/* Free glist resources */
1561 	rte_free(lio_dev->glist_head);
1562 	rte_free(lio_dev->glist_lock);
1563 	lio_dev->glist_head = NULL;
1564 	lio_dev->glist_lock = NULL;
1565 
1566 	lio_dev->port_configured = 0;
1567 
1568 	 /* Delete all queues */
1569 	lio_dev_clear_queues(eth_dev);
1570 }
1571 
1572 /**
1573  * Enable tunnel rx checksum verification from firmware.
1574  */
1575 static void
1576 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
1577 {
1578 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1579 	struct lio_dev_ctrl_cmd ctrl_cmd;
1580 	struct lio_ctrl_pkt ctrl_pkt;
1581 
1582 	/* flush added to prevent cmd failure
1583 	 * incase the queue is full
1584 	 */
1585 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1586 
1587 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1588 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1589 
1590 	ctrl_cmd.eth_dev = eth_dev;
1591 	ctrl_cmd.cond = 0;
1592 
1593 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
1594 	ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
1595 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1596 
1597 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1598 		lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
1599 		return;
1600 	}
1601 
1602 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1603 		lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
1604 }
1605 
1606 /**
1607  * Enable checksum calculation for inner packet in a tunnel.
1608  */
1609 static void
1610 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
1611 {
1612 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1613 	struct lio_dev_ctrl_cmd ctrl_cmd;
1614 	struct lio_ctrl_pkt ctrl_pkt;
1615 
1616 	/* flush added to prevent cmd failure
1617 	 * incase the queue is full
1618 	 */
1619 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1620 
1621 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1622 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1623 
1624 	ctrl_cmd.eth_dev = eth_dev;
1625 	ctrl_cmd.cond = 0;
1626 
1627 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
1628 	ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
1629 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1630 
1631 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1632 		lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
1633 		return;
1634 	}
1635 
1636 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1637 		lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
1638 }
1639 
1640 static int
1641 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
1642 			    int num_rxq)
1643 {
1644 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1645 	struct lio_dev_ctrl_cmd ctrl_cmd;
1646 	struct lio_ctrl_pkt ctrl_pkt;
1647 
1648 	if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
1649 		lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1650 			    LIO_Q_RECONF_MIN_VERSION);
1651 		return -ENOTSUP;
1652 	}
1653 
1654 	/* flush added to prevent cmd failure
1655 	 * incase the queue is full
1656 	 */
1657 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1658 
1659 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1660 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1661 
1662 	ctrl_cmd.eth_dev = eth_dev;
1663 	ctrl_cmd.cond = 0;
1664 
1665 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
1666 	ctrl_pkt.ncmd.s.param1 = num_txq;
1667 	ctrl_pkt.ncmd.s.param2 = num_rxq;
1668 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1669 
1670 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1671 		lio_dev_err(lio_dev, "Failed to send queue count control command\n");
1672 		return -1;
1673 	}
1674 
1675 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
1676 		lio_dev_err(lio_dev, "Queue count control command timed out\n");
1677 		return -1;
1678 	}
1679 
1680 	return 0;
1681 }
1682 
1683 static int
1684 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
1685 {
1686 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1687 
1688 	if (lio_dev->nb_rx_queues != num_rxq ||
1689 	    lio_dev->nb_tx_queues != num_txq) {
1690 		if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
1691 			return -1;
1692 		lio_dev->nb_rx_queues = num_rxq;
1693 		lio_dev->nb_tx_queues = num_txq;
1694 	}
1695 
1696 	if (lio_dev->intf_open)
1697 		lio_dev_stop(eth_dev);
1698 
1699 	/* Reset ioq registers */
1700 	if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1701 		lio_dev_err(lio_dev, "Failed to configure device registers\n");
1702 		return -1;
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 static int
1709 lio_dev_configure(struct rte_eth_dev *eth_dev)
1710 {
1711 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1712 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1713 	int retval, num_iqueues, num_oqueues;
1714 	uint8_t mac[ETHER_ADDR_LEN], i;
1715 	struct lio_if_cfg_resp *resp;
1716 	struct lio_soft_command *sc;
1717 	union lio_if_cfg if_cfg;
1718 	uint32_t resp_size;
1719 
1720 	PMD_INIT_FUNC_TRACE();
1721 
1722 	/* Inform firmware about change in number of queues to use.
1723 	 * Disable IO queues and reset registers for re-configuration.
1724 	 */
1725 	if (lio_dev->port_configured)
1726 		return lio_reconf_queues(eth_dev,
1727 					 eth_dev->data->nb_tx_queues,
1728 					 eth_dev->data->nb_rx_queues);
1729 
1730 	lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1731 	lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1732 
1733 	/* Set max number of queues which can be re-configured. */
1734 	lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
1735 	lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
1736 
1737 	resp_size = sizeof(struct lio_if_cfg_resp);
1738 	sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1739 	if (sc == NULL)
1740 		return -ENOMEM;
1741 
1742 	resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1743 
1744 	/* Firmware doesn't have capability to reconfigure the queues,
1745 	 * Claim all queues, and use as many required
1746 	 */
1747 	if_cfg.if_cfg64 = 0;
1748 	if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1749 	if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1750 	if_cfg.s.base_queue = 0;
1751 
1752 	if_cfg.s.gmx_port_id = lio_dev->pf_num;
1753 
1754 	lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1755 				 LIO_OPCODE_IF_CFG, 0,
1756 				 if_cfg.if_cfg64, 0);
1757 
1758 	/* Setting wait time in seconds */
1759 	sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1760 
1761 	retval = lio_send_soft_command(lio_dev, sc);
1762 	if (retval == LIO_IQ_SEND_FAILED) {
1763 		lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1764 			    retval);
1765 		/* Soft instr is freed by driver in case of failure. */
1766 		goto nic_config_fail;
1767 	}
1768 
1769 	/* Sleep on a wait queue till the cond flag indicates that the
1770 	 * response arrived or timed-out.
1771 	 */
1772 	while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1773 		lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1774 		lio_process_ordered_list(lio_dev);
1775 		rte_delay_ms(1);
1776 	}
1777 
1778 	retval = resp->status;
1779 	if (retval) {
1780 		lio_dev_err(lio_dev, "iq/oq config failed\n");
1781 		goto nic_config_fail;
1782 	}
1783 
1784 	snprintf(lio_dev->firmware_version, LIO_FW_VERSION_LENGTH, "%s",
1785 		 resp->cfg_info.lio_firmware_version);
1786 
1787 	lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1788 			 sizeof(struct octeon_if_cfg_info) >> 3);
1789 
1790 	num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1791 	num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1792 
1793 	if (!(num_iqueues) || !(num_oqueues)) {
1794 		lio_dev_err(lio_dev,
1795 			    "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1796 			    (unsigned long)resp->cfg_info.iqmask,
1797 			    (unsigned long)resp->cfg_info.oqmask);
1798 		goto nic_config_fail;
1799 	}
1800 
1801 	lio_dev_dbg(lio_dev,
1802 		    "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1803 		    eth_dev->data->port_id,
1804 		    (unsigned long)resp->cfg_info.iqmask,
1805 		    (unsigned long)resp->cfg_info.oqmask,
1806 		    num_iqueues, num_oqueues);
1807 
1808 	lio_dev->linfo.num_rxpciq = num_oqueues;
1809 	lio_dev->linfo.num_txpciq = num_iqueues;
1810 
1811 	for (i = 0; i < num_oqueues; i++) {
1812 		lio_dev->linfo.rxpciq[i].rxpciq64 =
1813 		    resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1814 		lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1815 			    i, lio_dev->linfo.rxpciq[i].s.q_no);
1816 	}
1817 
1818 	for (i = 0; i < num_iqueues; i++) {
1819 		lio_dev->linfo.txpciq[i].txpciq64 =
1820 		    resp->cfg_info.linfo.txpciq[i].txpciq64;
1821 		lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1822 			    i, lio_dev->linfo.txpciq[i].s.q_no);
1823 	}
1824 
1825 	lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1826 	lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1827 	lio_dev->linfo.link.link_status64 =
1828 			resp->cfg_info.linfo.link.link_status64;
1829 
1830 	/* 64-bit swap required on LE machines */
1831 	lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1832 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1833 		mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1834 				       2 + i));
1835 
1836 	/* Copy the permanent MAC address */
1837 	ether_addr_copy((struct ether_addr *)mac, &eth_dev->data->mac_addrs[0]);
1838 
1839 	/* enable firmware checksum support for tunnel packets */
1840 	lio_enable_hw_tunnel_rx_checksum(eth_dev);
1841 	lio_enable_hw_tunnel_tx_checksum(eth_dev);
1842 
1843 	lio_dev->glist_lock =
1844 	    rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1845 	if (lio_dev->glist_lock == NULL)
1846 		return -ENOMEM;
1847 
1848 	lio_dev->glist_head =
1849 		rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1850 			    0);
1851 	if (lio_dev->glist_head == NULL) {
1852 		rte_free(lio_dev->glist_lock);
1853 		lio_dev->glist_lock = NULL;
1854 		return -ENOMEM;
1855 	}
1856 
1857 	lio_dev_link_update(eth_dev, 0);
1858 
1859 	lio_dev->port_configured = 1;
1860 
1861 	lio_free_soft_command(sc);
1862 
1863 	/* Reset ioq regs */
1864 	lio_dev->fn_list.setup_device_regs(lio_dev);
1865 
1866 	/* Free iq_0 used during init */
1867 	lio_free_instr_queue0(lio_dev);
1868 
1869 	return 0;
1870 
1871 nic_config_fail:
1872 	lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1873 	lio_free_soft_command(sc);
1874 	lio_free_instr_queue0(lio_dev);
1875 
1876 	return -ENODEV;
1877 }
1878 
1879 /* Define our ethernet definitions */
1880 static const struct eth_dev_ops liovf_eth_dev_ops = {
1881 	.dev_configure		= lio_dev_configure,
1882 	.dev_start		= lio_dev_start,
1883 	.dev_stop		= lio_dev_stop,
1884 	.dev_set_link_up	= lio_dev_set_link_up,
1885 	.dev_set_link_down	= lio_dev_set_link_down,
1886 	.dev_close		= lio_dev_close,
1887 	.promiscuous_enable	= lio_dev_promiscuous_enable,
1888 	.promiscuous_disable	= lio_dev_promiscuous_disable,
1889 	.allmulticast_enable	= lio_dev_allmulticast_enable,
1890 	.allmulticast_disable	= lio_dev_allmulticast_disable,
1891 	.link_update		= lio_dev_link_update,
1892 	.stats_get		= lio_dev_stats_get,
1893 	.xstats_get		= lio_dev_xstats_get,
1894 	.xstats_get_names	= lio_dev_xstats_get_names,
1895 	.stats_reset		= lio_dev_stats_reset,
1896 	.xstats_reset		= lio_dev_xstats_reset,
1897 	.dev_infos_get		= lio_dev_info_get,
1898 	.vlan_filter_set	= lio_dev_vlan_filter_set,
1899 	.rx_queue_setup		= lio_dev_rx_queue_setup,
1900 	.rx_queue_release	= lio_dev_rx_queue_release,
1901 	.tx_queue_setup		= lio_dev_tx_queue_setup,
1902 	.tx_queue_release	= lio_dev_tx_queue_release,
1903 	.reta_update		= lio_dev_rss_reta_update,
1904 	.reta_query		= lio_dev_rss_reta_query,
1905 	.rss_hash_conf_get	= lio_dev_rss_hash_conf_get,
1906 	.rss_hash_update	= lio_dev_rss_hash_update,
1907 	.udp_tunnel_port_add	= lio_dev_udp_tunnel_add,
1908 	.udp_tunnel_port_del	= lio_dev_udp_tunnel_del,
1909 	.mtu_set		= lio_dev_mtu_set,
1910 };
1911 
1912 static void
1913 lio_check_pf_hs_response(void *lio_dev)
1914 {
1915 	struct lio_device *dev = lio_dev;
1916 
1917 	/* check till response arrives */
1918 	if (dev->pfvf_hsword.coproc_tics_per_us)
1919 		return;
1920 
1921 	cn23xx_vf_handle_mbox(dev);
1922 
1923 	rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1924 }
1925 
1926 /**
1927  * \brief Identify the LIO device and to map the BAR address space
1928  * @param lio_dev lio device
1929  */
1930 static int
1931 lio_chip_specific_setup(struct lio_device *lio_dev)
1932 {
1933 	struct rte_pci_device *pdev = lio_dev->pci_dev;
1934 	uint32_t dev_id = pdev->id.device_id;
1935 	const char *s;
1936 	int ret = 1;
1937 
1938 	switch (dev_id) {
1939 	case LIO_CN23XX_VF_VID:
1940 		lio_dev->chip_id = LIO_CN23XX_VF_VID;
1941 		ret = cn23xx_vf_setup_device(lio_dev);
1942 		s = "CN23XX VF";
1943 		break;
1944 	default:
1945 		s = "?";
1946 		lio_dev_err(lio_dev, "Unsupported Chip\n");
1947 	}
1948 
1949 	if (!ret)
1950 		lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1951 
1952 	return ret;
1953 }
1954 
1955 static int
1956 lio_first_time_init(struct lio_device *lio_dev,
1957 		    struct rte_pci_device *pdev)
1958 {
1959 	int dpdk_queues;
1960 
1961 	PMD_INIT_FUNC_TRACE();
1962 
1963 	/* set dpdk specific pci device pointer */
1964 	lio_dev->pci_dev = pdev;
1965 
1966 	/* Identify the LIO type and set device ops */
1967 	if (lio_chip_specific_setup(lio_dev)) {
1968 		lio_dev_err(lio_dev, "Chip specific setup failed\n");
1969 		return -1;
1970 	}
1971 
1972 	/* Initialize soft command buffer pool */
1973 	if (lio_setup_sc_buffer_pool(lio_dev)) {
1974 		lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
1975 		return -1;
1976 	}
1977 
1978 	/* Initialize lists to manage the requests of different types that
1979 	 * arrive from applications for this lio device.
1980 	 */
1981 	lio_setup_response_list(lio_dev);
1982 
1983 	if (lio_dev->fn_list.setup_mbox(lio_dev)) {
1984 		lio_dev_err(lio_dev, "Mailbox setup failed\n");
1985 		goto error;
1986 	}
1987 
1988 	/* Check PF response */
1989 	lio_check_pf_hs_response((void *)lio_dev);
1990 
1991 	/* Do handshake and exit if incompatible PF driver */
1992 	if (cn23xx_pfvf_handshake(lio_dev))
1993 		goto error;
1994 
1995 	/* Request and wait for device reset. */
1996 	if (pdev->kdrv == RTE_KDRV_IGB_UIO) {
1997 		cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1998 		/* FLR wait time doubled as a precaution. */
1999 		rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
2000 	}
2001 
2002 	if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
2003 		lio_dev_err(lio_dev, "Failed to configure device registers\n");
2004 		goto error;
2005 	}
2006 
2007 	if (lio_setup_instr_queue0(lio_dev)) {
2008 		lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
2009 		goto error;
2010 	}
2011 
2012 	dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
2013 
2014 	lio_dev->max_tx_queues = dpdk_queues;
2015 	lio_dev->max_rx_queues = dpdk_queues;
2016 
2017 	/* Enable input and output queues for this device */
2018 	if (lio_dev->fn_list.enable_io_queues(lio_dev))
2019 		goto error;
2020 
2021 	return 0;
2022 
2023 error:
2024 	lio_free_sc_buffer_pool(lio_dev);
2025 	if (lio_dev->mbox[0])
2026 		lio_dev->fn_list.free_mbox(lio_dev);
2027 	if (lio_dev->instr_queue[0])
2028 		lio_free_instr_queue0(lio_dev);
2029 
2030 	return -1;
2031 }
2032 
2033 static int
2034 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2035 {
2036 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
2037 
2038 	PMD_INIT_FUNC_TRACE();
2039 
2040 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2041 		return 0;
2042 
2043 	/* lio_free_sc_buffer_pool */
2044 	lio_free_sc_buffer_pool(lio_dev);
2045 
2046 	eth_dev->dev_ops = NULL;
2047 	eth_dev->rx_pkt_burst = NULL;
2048 	eth_dev->tx_pkt_burst = NULL;
2049 
2050 	return 0;
2051 }
2052 
2053 static int
2054 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
2055 {
2056 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
2057 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
2058 
2059 	PMD_INIT_FUNC_TRACE();
2060 
2061 	eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
2062 	eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
2063 
2064 	/* Primary does the initialization. */
2065 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2066 		return 0;
2067 
2068 	rte_eth_copy_pci_info(eth_dev, pdev);
2069 
2070 	if (pdev->mem_resource[0].addr) {
2071 		lio_dev->hw_addr = pdev->mem_resource[0].addr;
2072 	} else {
2073 		PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
2074 		return -ENODEV;
2075 	}
2076 
2077 	lio_dev->eth_dev = eth_dev;
2078 	/* set lio device print string */
2079 	snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
2080 		 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
2081 		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2082 
2083 	lio_dev->port_id = eth_dev->data->port_id;
2084 
2085 	if (lio_first_time_init(lio_dev, pdev)) {
2086 		lio_dev_err(lio_dev, "Device init failed\n");
2087 		return -EINVAL;
2088 	}
2089 
2090 	eth_dev->dev_ops = &liovf_eth_dev_ops;
2091 	eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
2092 	if (eth_dev->data->mac_addrs == NULL) {
2093 		lio_dev_err(lio_dev,
2094 			    "MAC addresses memory allocation failed\n");
2095 		eth_dev->dev_ops = NULL;
2096 		eth_dev->rx_pkt_burst = NULL;
2097 		eth_dev->tx_pkt_burst = NULL;
2098 		return -ENOMEM;
2099 	}
2100 
2101 	rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
2102 	rte_wmb();
2103 
2104 	lio_dev->port_configured = 0;
2105 	/* Always allow unicast packets */
2106 	lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
2107 
2108 	return 0;
2109 }
2110 
2111 static int
2112 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2113 		      struct rte_pci_device *pci_dev)
2114 {
2115 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
2116 			lio_eth_dev_init);
2117 }
2118 
2119 static int
2120 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2121 {
2122 	return rte_eth_dev_pci_generic_remove(pci_dev,
2123 					      lio_eth_dev_uninit);
2124 }
2125 
2126 /* Set of PCI devices this driver supports */
2127 static const struct rte_pci_id pci_id_liovf_map[] = {
2128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
2129 	{ .vendor_id = 0, /* sentinel */ }
2130 };
2131 
2132 static struct rte_pci_driver rte_liovf_pmd = {
2133 	.id_table	= pci_id_liovf_map,
2134 	.drv_flags      = RTE_PCI_DRV_NEED_MAPPING,
2135 	.probe		= lio_eth_dev_pci_probe,
2136 	.remove		= lio_eth_dev_pci_remove,
2137 };
2138 
2139 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
2140 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
2141 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
2142 
2143 RTE_INIT(lio_init_log)
2144 {
2145 	lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
2146 	if (lio_logtype_init >= 0)
2147 		rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE);
2148 	lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver");
2149 	if (lio_logtype_driver >= 0)
2150 		rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE);
2151 }
2152