xref: /dpdk/drivers/net/nfp/nfp_common.c (revision 733afb24)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_common.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Common files
14  */
15 
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_dev.h>
23 #include <rte_ether.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_mempool.h>
27 #include <rte_version.h>
28 #include <rte_string_fns.h>
29 #include <rte_alarm.h>
30 #include <rte_spinlock.h>
31 #include <rte_service_component.h>
32 
33 #include "nfpcore/nfp_cpp.h"
34 #include "nfpcore/nfp_nffw.h"
35 #include "nfpcore/nfp_hwinfo.h"
36 #include "nfpcore/nfp_mip.h"
37 #include "nfpcore/nfp_rtsym.h"
38 #include "nfpcore/nfp_nsp.h"
39 
40 #include "nfp_common.h"
41 #include "nfp_rxtx.h"
42 #include "nfp_logs.h"
43 #include "nfp_ctrl.h"
44 #include "nfp_cpp_bridge.h"
45 
46 #include <sys/types.h>
47 #include <sys/socket.h>
48 #include <sys/un.h>
49 #include <unistd.h>
50 #include <stdio.h>
51 #include <sys/ioctl.h>
52 #include <errno.h>
53 
54 static int
__nfp_net_reconfig(struct nfp_net_hw * hw,uint32_t update)55 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
56 {
57 	int cnt;
58 	uint32_t new;
59 	struct timespec wait;
60 
61 	PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
62 		    hw->qcp_cfg);
63 
64 	if (hw->qcp_cfg == NULL)
65 		rte_panic("Bad configuration queue pointer\n");
66 
67 	nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
68 
69 	wait.tv_sec = 0;
70 	wait.tv_nsec = 1000000;
71 
72 	PMD_DRV_LOG(DEBUG, "Polling for update ack...");
73 
74 	/* Poll update field, waiting for NFP to ack the config */
75 	for (cnt = 0; ; cnt++) {
76 		new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
77 		if (new == 0)
78 			break;
79 		if (new & NFP_NET_CFG_UPDATE_ERR) {
80 			PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
81 			return -1;
82 		}
83 		if (cnt >= NFP_NET_POLL_TIMEOUT) {
84 			PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
85 					  " %dms", update, cnt);
86 			rte_panic("Exiting\n");
87 		}
88 		nanosleep(&wait, 0); /* waiting for a 1ms */
89 	}
90 	PMD_DRV_LOG(DEBUG, "Ack DONE");
91 	return 0;
92 }
93 
94 /*
95  * Reconfigure the NIC
96  * @nn:    device to reconfigure
97  * @ctrl:    The value for the ctrl field in the BAR config
98  * @update:  The value for the update field in the BAR config
99  *
100  * Write the update word to the BAR and ping the reconfig queue. Then poll
101  * until the firmware has acknowledged the update by zeroing the update word.
102  */
103 int
nfp_net_reconfig(struct nfp_net_hw * hw,uint32_t ctrl,uint32_t update)104 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
105 {
106 	uint32_t err;
107 
108 	PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
109 		    ctrl, update);
110 
111 	rte_spinlock_lock(&hw->reconfig_lock);
112 
113 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
114 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
115 
116 	rte_wmb();
117 
118 	err = __nfp_net_reconfig(hw, update);
119 
120 	rte_spinlock_unlock(&hw->reconfig_lock);
121 
122 	if (!err)
123 		return 0;
124 
125 	/*
126 	 * Reconfig errors imply situations where they can be handled.
127 	 * Otherwise, rte_panic is called inside __nfp_net_reconfig
128 	 */
129 	PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
130 		     ctrl, update);
131 	return -EIO;
132 }
133 
134 /*
135  * Configure an Ethernet device. This function must be invoked first
136  * before any other function in the Ethernet API. This function can
137  * also be re-invoked when a device is in the stopped state.
138  */
139 int
nfp_net_configure(struct rte_eth_dev * dev)140 nfp_net_configure(struct rte_eth_dev *dev)
141 {
142 	struct rte_eth_conf *dev_conf;
143 	struct rte_eth_rxmode *rxmode;
144 	struct rte_eth_txmode *txmode;
145 	struct nfp_net_hw *hw;
146 
147 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
148 
149 	/*
150 	 * A DPDK app sends info about how many queues to use and how
151 	 * those queues need to be configured. This is used by the
152 	 * DPDK core and it makes sure no more queues than those
153 	 * advertised by the driver are requested. This function is
154 	 * called after that internal process
155 	 */
156 
157 	PMD_INIT_LOG(DEBUG, "Configure");
158 
159 	dev_conf = &dev->data->dev_conf;
160 	rxmode = &dev_conf->rxmode;
161 	txmode = &dev_conf->txmode;
162 
163 	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
164 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
165 
166 	/* Checking TX mode */
167 	if (txmode->mq_mode) {
168 		PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
169 		return -EINVAL;
170 	}
171 
172 	/* Checking RX mode */
173 	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
174 	    !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
175 		PMD_INIT_LOG(INFO, "RSS not supported");
176 		return -EINVAL;
177 	}
178 
179 	return 0;
180 }
181 
182 void
nfp_net_enable_queues(struct rte_eth_dev * dev)183 nfp_net_enable_queues(struct rte_eth_dev *dev)
184 {
185 	struct nfp_net_hw *hw;
186 	uint64_t enabled_queues = 0;
187 	int i;
188 
189 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
190 
191 	/* Enabling the required TX queues in the device */
192 	for (i = 0; i < dev->data->nb_tx_queues; i++)
193 		enabled_queues |= (1 << i);
194 
195 	nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
196 
197 	enabled_queues = 0;
198 
199 	/* Enabling the required RX queues in the device */
200 	for (i = 0; i < dev->data->nb_rx_queues; i++)
201 		enabled_queues |= (1 << i);
202 
203 	nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
204 }
205 
206 void
nfp_net_disable_queues(struct rte_eth_dev * dev)207 nfp_net_disable_queues(struct rte_eth_dev *dev)
208 {
209 	struct nfp_net_hw *hw;
210 	uint32_t new_ctrl, update = 0;
211 
212 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
213 
214 	nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
215 	nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
216 
217 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
218 	update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
219 		 NFP_NET_CFG_UPDATE_MSIX;
220 
221 	if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
222 		new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
223 
224 	/* If an error when reconfig we avoid to change hw state */
225 	if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
226 		return;
227 
228 	hw->ctrl = new_ctrl;
229 }
230 
231 void
nfp_net_params_setup(struct nfp_net_hw * hw)232 nfp_net_params_setup(struct nfp_net_hw *hw)
233 {
234 	nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
235 	nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
236 }
237 
238 void
nfp_net_cfg_queue_setup(struct nfp_net_hw * hw)239 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
240 {
241 	hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
242 }
243 
244 #define ETH_ADDR_LEN	6
245 
246 void
nfp_eth_copy_mac(uint8_t * dst,const uint8_t * src)247 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
248 {
249 	int i;
250 
251 	for (i = 0; i < ETH_ADDR_LEN; i++)
252 		dst[i] = src[i];
253 }
254 
255 void
nfp_net_write_mac(struct nfp_net_hw * hw,uint8_t * mac)256 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
257 {
258 	uint32_t mac0 = *(uint32_t *)mac;
259 	uint16_t mac1;
260 
261 	nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
262 
263 	mac += 4;
264 	mac1 = *(uint16_t *)mac;
265 	nn_writew(rte_cpu_to_be_16(mac1),
266 		  hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
267 }
268 
269 int
nfp_set_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)270 nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
271 {
272 	struct nfp_net_hw *hw;
273 	uint32_t update, ctrl;
274 
275 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
276 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
277 	    !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
278 		PMD_INIT_LOG(INFO, "MAC address unable to change when"
279 				  " port enabled");
280 		return -EBUSY;
281 	}
282 
283 	/* Writing new MAC to the specific port BAR address */
284 	nfp_net_write_mac(hw, (uint8_t *)mac_addr);
285 
286 	/* Signal the NIC about the change */
287 	update = NFP_NET_CFG_UPDATE_MACADDR;
288 	ctrl = hw->ctrl;
289 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
290 	    (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
291 		ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
292 	if (nfp_net_reconfig(hw, ctrl, update) < 0) {
293 		PMD_INIT_LOG(INFO, "MAC address update failed");
294 		return -EIO;
295 	}
296 	return 0;
297 }
298 
299 int
nfp_configure_rx_interrupt(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)300 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
301 			   struct rte_intr_handle *intr_handle)
302 {
303 	struct nfp_net_hw *hw;
304 	int i;
305 
306 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
307 				    dev->data->nb_rx_queues)) {
308 		PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
309 			     " intr_vec", dev->data->nb_rx_queues);
310 		return -ENOMEM;
311 	}
312 
313 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
314 
315 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
316 		PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
317 		/* UIO just supports one queue and no LSC*/
318 		nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
319 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0))
320 			return -1;
321 	} else {
322 		PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
323 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
324 			/*
325 			 * The first msix vector is reserved for non
326 			 * efd interrupts
327 			*/
328 			nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
329 			if (rte_intr_vec_list_index_set(intr_handle, i,
330 							       i + 1))
331 				return -1;
332 			PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
333 				rte_intr_vec_list_index_get(intr_handle,
334 								   i));
335 		}
336 	}
337 
338 	/* Avoiding TX interrupts */
339 	hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
340 	return 0;
341 }
342 
343 uint32_t
nfp_check_offloads(struct rte_eth_dev * dev)344 nfp_check_offloads(struct rte_eth_dev *dev)
345 {
346 	struct nfp_net_hw *hw;
347 	struct rte_eth_conf *dev_conf;
348 	struct rte_eth_rxmode *rxmode;
349 	struct rte_eth_txmode *txmode;
350 	uint32_t ctrl = 0;
351 
352 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
353 
354 	dev_conf = &dev->data->dev_conf;
355 	rxmode = &dev_conf->rxmode;
356 	txmode = &dev_conf->txmode;
357 
358 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
359 		if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
360 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
361 	}
362 
363 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
364 		if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
365 			ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
366 	}
367 
368 	hw->mtu = dev->data->mtu;
369 
370 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
371 		ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
372 
373 	/* L2 broadcast */
374 	if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
375 		ctrl |= NFP_NET_CFG_CTRL_L2BC;
376 
377 	/* L2 multicast */
378 	if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
379 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
380 
381 	/* TX checksum offload */
382 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
383 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
384 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
385 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
386 
387 	/* LSO offload */
388 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
389 		if (hw->cap & NFP_NET_CFG_CTRL_LSO)
390 			ctrl |= NFP_NET_CFG_CTRL_LSO;
391 		else
392 			ctrl |= NFP_NET_CFG_CTRL_LSO2;
393 	}
394 
395 	/* RX gather */
396 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
397 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
398 
399 	return ctrl;
400 }
401 
402 int
nfp_net_promisc_enable(struct rte_eth_dev * dev)403 nfp_net_promisc_enable(struct rte_eth_dev *dev)
404 {
405 	uint32_t new_ctrl, update = 0;
406 	struct nfp_net_hw *hw;
407 	int ret;
408 
409 	PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
410 
411 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
412 
413 	if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
414 		PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
415 		return -ENOTSUP;
416 	}
417 
418 	if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
419 		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
420 		return 0;
421 	}
422 
423 	new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
424 	update = NFP_NET_CFG_UPDATE_GEN;
425 
426 	/*
427 	 * DPDK sets promiscuous mode on just after this call assuming
428 	 * it can not fail ...
429 	 */
430 	ret = nfp_net_reconfig(hw, new_ctrl, update);
431 	if (ret < 0)
432 		return ret;
433 
434 	hw->ctrl = new_ctrl;
435 
436 	return 0;
437 }
438 
439 int
nfp_net_promisc_disable(struct rte_eth_dev * dev)440 nfp_net_promisc_disable(struct rte_eth_dev *dev)
441 {
442 	uint32_t new_ctrl, update = 0;
443 	struct nfp_net_hw *hw;
444 	int ret;
445 
446 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
447 
448 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
449 		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
450 		return 0;
451 	}
452 
453 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
454 	update = NFP_NET_CFG_UPDATE_GEN;
455 
456 	/*
457 	 * DPDK sets promiscuous mode off just before this call
458 	 * assuming it can not fail ...
459 	 */
460 	ret = nfp_net_reconfig(hw, new_ctrl, update);
461 	if (ret < 0)
462 		return ret;
463 
464 	hw->ctrl = new_ctrl;
465 
466 	return 0;
467 }
468 
469 /*
470  * return 0 means link status changed, -1 means not changed
471  *
472  * Wait to complete is needed as it can take up to 9 seconds to get the Link
473  * status.
474  */
475 int
nfp_net_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)476 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
477 {
478 	struct nfp_net_hw *hw;
479 	struct rte_eth_link link;
480 	uint32_t nn_link_status;
481 	int ret;
482 
483 	static const uint32_t ls_to_ethtool[] = {
484 		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
485 		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
486 		[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
487 		[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
488 		[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
489 		[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
490 		[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
491 		[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
492 	};
493 
494 	PMD_DRV_LOG(DEBUG, "Link update");
495 
496 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
497 
498 	nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
499 
500 	memset(&link, 0, sizeof(struct rte_eth_link));
501 
502 	if (nn_link_status & NFP_NET_CFG_STS_LINK)
503 		link.link_status = RTE_ETH_LINK_UP;
504 
505 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
506 
507 	nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
508 			 NFP_NET_CFG_STS_LINK_RATE_MASK;
509 
510 	if (nn_link_status >= RTE_DIM(ls_to_ethtool))
511 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
512 	else
513 		link.link_speed = ls_to_ethtool[nn_link_status];
514 
515 	ret = rte_eth_linkstatus_set(dev, &link);
516 	if (ret == 0) {
517 		if (link.link_status)
518 			PMD_DRV_LOG(INFO, "NIC Link is Up");
519 		else
520 			PMD_DRV_LOG(INFO, "NIC Link is Down");
521 	}
522 	return ret;
523 }
524 
525 int
nfp_net_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)526 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
527 {
528 	int i;
529 	struct nfp_net_hw *hw;
530 	struct rte_eth_stats nfp_dev_stats;
531 
532 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
533 
534 	/* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
535 
536 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
537 
538 	/* reading per RX ring stats */
539 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
540 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
541 			break;
542 
543 		nfp_dev_stats.q_ipackets[i] =
544 			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
545 
546 		nfp_dev_stats.q_ipackets[i] -=
547 			hw->eth_stats_base.q_ipackets[i];
548 
549 		nfp_dev_stats.q_ibytes[i] =
550 			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
551 
552 		nfp_dev_stats.q_ibytes[i] -=
553 			hw->eth_stats_base.q_ibytes[i];
554 	}
555 
556 	/* reading per TX ring stats */
557 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
558 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
559 			break;
560 
561 		nfp_dev_stats.q_opackets[i] =
562 			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
563 
564 		nfp_dev_stats.q_opackets[i] -=
565 			hw->eth_stats_base.q_opackets[i];
566 
567 		nfp_dev_stats.q_obytes[i] =
568 			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
569 
570 		nfp_dev_stats.q_obytes[i] -=
571 			hw->eth_stats_base.q_obytes[i];
572 	}
573 
574 	nfp_dev_stats.ipackets =
575 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
576 
577 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
578 
579 	nfp_dev_stats.ibytes =
580 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
581 
582 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
583 
584 	nfp_dev_stats.opackets =
585 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
586 
587 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
588 
589 	nfp_dev_stats.obytes =
590 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
591 
592 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
593 
594 	/* reading general device stats */
595 	nfp_dev_stats.ierrors =
596 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
597 
598 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
599 
600 	nfp_dev_stats.oerrors =
601 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
602 
603 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
604 
605 	/* RX ring mbuf allocation failures */
606 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
607 
608 	nfp_dev_stats.imissed =
609 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
610 
611 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
612 
613 	if (stats) {
614 		memcpy(stats, &nfp_dev_stats, sizeof(*stats));
615 		return 0;
616 	}
617 	return -EINVAL;
618 }
619 
620 int
nfp_net_stats_reset(struct rte_eth_dev * dev)621 nfp_net_stats_reset(struct rte_eth_dev *dev)
622 {
623 	int i;
624 	struct nfp_net_hw *hw;
625 
626 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
627 
628 	/*
629 	 * hw->eth_stats_base records the per counter starting point.
630 	 * Lets update it now
631 	 */
632 
633 	/* reading per RX ring stats */
634 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
635 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
636 			break;
637 
638 		hw->eth_stats_base.q_ipackets[i] =
639 			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
640 
641 		hw->eth_stats_base.q_ibytes[i] =
642 			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
643 	}
644 
645 	/* reading per TX ring stats */
646 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
647 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
648 			break;
649 
650 		hw->eth_stats_base.q_opackets[i] =
651 			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
652 
653 		hw->eth_stats_base.q_obytes[i] =
654 			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
655 	}
656 
657 	hw->eth_stats_base.ipackets =
658 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
659 
660 	hw->eth_stats_base.ibytes =
661 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
662 
663 	hw->eth_stats_base.opackets =
664 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
665 
666 	hw->eth_stats_base.obytes =
667 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
668 
669 	/* reading general device stats */
670 	hw->eth_stats_base.ierrors =
671 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
672 
673 	hw->eth_stats_base.oerrors =
674 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
675 
676 	/* RX ring mbuf allocation failures */
677 	dev->data->rx_mbuf_alloc_failed = 0;
678 
679 	hw->eth_stats_base.imissed =
680 		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
681 
682 	return 0;
683 }
684 
685 int
nfp_net_infos_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)686 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
687 {
688 	struct nfp_net_hw *hw;
689 
690 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691 
692 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
693 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
694 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
695 	/*
696 	 * The maximum rx packet length (max_rx_pktlen) is set to the
697 	 * maximum supported frame size that the NFP can handle. This
698 	 * includes layer 2 headers, CRC and other metadata that can
699 	 * optionally be used.
700 	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
701 	 * which was set by the firmware loaded onto the card.
702 	 */
703 	dev_info->max_rx_pktlen = NFP_FRAME_SIZE_MAX;
704 	dev_info->max_mtu = hw->max_mtu;
705 	/* Next should change when PF support is implemented */
706 	dev_info->max_mac_addrs = 1;
707 
708 	if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
709 		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
710 
711 	if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
712 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
713 					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
714 					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
715 
716 	if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
717 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
718 
719 	if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
720 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
721 					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
722 					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
723 
724 	if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
725 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
726 
727 	if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
728 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
729 
730 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
731 		.rx_thresh = {
732 			.pthresh = DEFAULT_RX_PTHRESH,
733 			.hthresh = DEFAULT_RX_HTHRESH,
734 			.wthresh = DEFAULT_RX_WTHRESH,
735 		},
736 		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
737 		.rx_drop_en = 0,
738 	};
739 
740 	dev_info->default_txconf = (struct rte_eth_txconf) {
741 		.tx_thresh = {
742 			.pthresh = DEFAULT_TX_PTHRESH,
743 			.hthresh = DEFAULT_TX_HTHRESH,
744 			.wthresh = DEFAULT_TX_WTHRESH,
745 		},
746 		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
747 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
748 	};
749 
750 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
751 		.nb_max = NFP_NET_MAX_RX_DESC,
752 		.nb_min = NFP_NET_MIN_RX_DESC,
753 		.nb_align = NFP_ALIGN_RING_DESC,
754 	};
755 
756 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
757 		.nb_max = NFP_NET_MAX_TX_DESC,
758 		.nb_min = NFP_NET_MIN_TX_DESC,
759 		.nb_align = NFP_ALIGN_RING_DESC,
760 		.nb_seg_max = NFP_TX_MAX_SEG,
761 		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
762 	};
763 
764 	if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
765 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
766 
767 		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
768 						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
769 						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
770 						   RTE_ETH_RSS_IPV6 |
771 						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
772 						   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
773 
774 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
775 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
776 	}
777 
778 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
779 			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
780 			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
781 
782 	return 0;
783 }
784 
785 const uint32_t *
nfp_net_supported_ptypes_get(struct rte_eth_dev * dev)786 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
787 {
788 	static const uint32_t ptypes[] = {
789 		/* refers to nfp_net_set_hash() */
790 		RTE_PTYPE_INNER_L3_IPV4,
791 		RTE_PTYPE_INNER_L3_IPV6,
792 		RTE_PTYPE_INNER_L3_IPV6_EXT,
793 		RTE_PTYPE_INNER_L4_MASK,
794 		RTE_PTYPE_UNKNOWN
795 	};
796 
797 	if (dev->rx_pkt_burst == nfp_net_recv_pkts)
798 		return ptypes;
799 	return NULL;
800 }
801 
802 int
nfp_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)803 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
804 {
805 	struct rte_pci_device *pci_dev;
806 	struct nfp_net_hw *hw;
807 	int base = 0;
808 
809 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
810 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
811 
812 	if (rte_intr_type_get(pci_dev->intr_handle) !=
813 							RTE_INTR_HANDLE_UIO)
814 		base = 1;
815 
816 	/* Make sure all updates are written before un-masking */
817 	rte_wmb();
818 	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
819 		      NFP_NET_CFG_ICR_UNMASKED);
820 	return 0;
821 }
822 
823 int
nfp_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)824 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
825 {
826 	struct rte_pci_device *pci_dev;
827 	struct nfp_net_hw *hw;
828 	int base = 0;
829 
830 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
831 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
832 
833 	if (rte_intr_type_get(pci_dev->intr_handle) !=
834 							RTE_INTR_HANDLE_UIO)
835 		base = 1;
836 
837 	/* Make sure all updates are written before un-masking */
838 	rte_wmb();
839 	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
840 	return 0;
841 }
842 
843 static void
nfp_net_dev_link_status_print(struct rte_eth_dev * dev)844 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
845 {
846 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
847 	struct rte_eth_link link;
848 
849 	rte_eth_linkstatus_get(dev, &link);
850 	if (link.link_status)
851 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
852 			    dev->data->port_id, link.link_speed,
853 			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
854 			    ? "full-duplex" : "half-duplex");
855 	else
856 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
857 			    dev->data->port_id);
858 
859 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
860 		    pci_dev->addr.domain, pci_dev->addr.bus,
861 		    pci_dev->addr.devid, pci_dev->addr.function);
862 }
863 
864 /* Interrupt configuration and handling */
865 
866 /*
867  * nfp_net_irq_unmask - Unmask an interrupt
868  *
869  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
870  * clear the ICR for the entry.
871  */
872 static void
nfp_net_irq_unmask(struct rte_eth_dev * dev)873 nfp_net_irq_unmask(struct rte_eth_dev *dev)
874 {
875 	struct nfp_net_hw *hw;
876 	struct rte_pci_device *pci_dev;
877 
878 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
879 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
880 
881 	if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
882 		/* If MSI-X auto-masking is used, clear the entry */
883 		rte_wmb();
884 		rte_intr_ack(pci_dev->intr_handle);
885 	} else {
886 		/* Make sure all updates are written before un-masking */
887 		rte_wmb();
888 		nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
889 			      NFP_NET_CFG_ICR_UNMASKED);
890 	}
891 }
892 
893 /*
894  * Interrupt handler which shall be registered for alarm callback for delayed
895  * handling specific interrupt to wait for the stable nic state. As the NIC
896  * interrupt state is not stable for nfp after link is just down, it needs
897  * to wait 4 seconds to get the stable status.
898  *
899  * @param handle   Pointer to interrupt handle.
900  * @param param    The address of parameter (struct rte_eth_dev *)
901  *
902  * @return  void
903  */
904 void
nfp_net_dev_interrupt_delayed_handler(void * param)905 nfp_net_dev_interrupt_delayed_handler(void *param)
906 {
907 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
908 
909 	nfp_net_link_update(dev, 0);
910 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
911 
912 	nfp_net_dev_link_status_print(dev);
913 
914 	/* Unmasking */
915 	nfp_net_irq_unmask(dev);
916 }
917 
918 void
nfp_net_dev_interrupt_handler(void * param)919 nfp_net_dev_interrupt_handler(void *param)
920 {
921 	int64_t timeout;
922 	struct rte_eth_link link;
923 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
924 
925 	PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
926 
927 	rte_eth_linkstatus_get(dev, &link);
928 
929 	nfp_net_link_update(dev, 0);
930 
931 	/* likely to up */
932 	if (!link.link_status) {
933 		/* handle it 1 sec later, wait it being stable */
934 		timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
935 		/* likely to down */
936 	} else {
937 		/* handle it 4 sec later, wait it being stable */
938 		timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
939 	}
940 
941 	if (rte_eal_alarm_set(timeout * 1000,
942 			      nfp_net_dev_interrupt_delayed_handler,
943 			      (void *)dev) < 0) {
944 		PMD_INIT_LOG(ERR, "Error setting alarm");
945 		/* Unmasking */
946 		nfp_net_irq_unmask(dev);
947 	}
948 }
949 
950 int
nfp_net_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)951 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
952 {
953 	struct nfp_net_hw *hw;
954 
955 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
956 
957 	/* mtu setting is forbidden if port is started */
958 	if (dev->data->dev_started) {
959 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
960 			    dev->data->port_id);
961 		return -EBUSY;
962 	}
963 
964 	/* writing to configuration space */
965 	nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
966 
967 	hw->mtu = mtu;
968 
969 	return 0;
970 }
971 
972 int
nfp_net_vlan_offload_set(struct rte_eth_dev * dev,int mask)973 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
974 {
975 	uint32_t new_ctrl, update;
976 	struct nfp_net_hw *hw;
977 	int ret;
978 
979 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
980 	new_ctrl = 0;
981 
982 	/* Enable vlan strip if it is not configured yet */
983 	if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
984 	    !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
985 		new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
986 
987 	/* Disable vlan strip just if it is configured */
988 	if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
989 	    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
990 		new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
991 
992 	if (new_ctrl == 0)
993 		return 0;
994 
995 	update = NFP_NET_CFG_UPDATE_GEN;
996 
997 	ret = nfp_net_reconfig(hw, new_ctrl, update);
998 	if (!ret)
999 		hw->ctrl = new_ctrl;
1000 
1001 	return ret;
1002 }
1003 
1004 static int
nfp_net_rss_reta_write(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1005 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1006 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1007 		    uint16_t reta_size)
1008 {
1009 	uint32_t reta, mask;
1010 	int i, j;
1011 	int idx, shift;
1012 	struct nfp_net_hw *hw =
1013 		NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1014 
1015 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1016 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1017 			"(%d) doesn't match the number hardware can supported "
1018 			"(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1019 		return -EINVAL;
1020 	}
1021 
1022 	/*
1023 	 * Update Redirection Table. There are 128 8bit-entries which can be
1024 	 * manage as 32 32bit-entries
1025 	 */
1026 	for (i = 0; i < reta_size; i += 4) {
1027 		/* Handling 4 RSS entries per loop */
1028 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1029 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1030 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1031 
1032 		if (!mask)
1033 			continue;
1034 
1035 		reta = 0;
1036 		/* If all 4 entries were set, don't need read RETA register */
1037 		if (mask != 0xF)
1038 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1039 
1040 		for (j = 0; j < 4; j++) {
1041 			if (!(mask & (0x1 << j)))
1042 				continue;
1043 			if (mask != 0xF)
1044 				/* Clearing the entry bits */
1045 				reta &= ~(0xFF << (8 * j));
1046 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1047 		}
1048 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
1049 			      reta);
1050 	}
1051 	return 0;
1052 }
1053 
1054 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1055 int
nfp_net_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1056 nfp_net_reta_update(struct rte_eth_dev *dev,
1057 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1058 		    uint16_t reta_size)
1059 {
1060 	struct nfp_net_hw *hw =
1061 		NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062 	uint32_t update;
1063 	int ret;
1064 
1065 	if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1066 		return -EINVAL;
1067 
1068 	ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1069 	if (ret != 0)
1070 		return ret;
1071 
1072 	update = NFP_NET_CFG_UPDATE_RSS;
1073 
1074 	if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1075 		return -EIO;
1076 
1077 	return 0;
1078 }
1079 
1080  /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1081 int
nfp_net_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1082 nfp_net_reta_query(struct rte_eth_dev *dev,
1083 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1084 		   uint16_t reta_size)
1085 {
1086 	uint8_t i, j, mask;
1087 	int idx, shift;
1088 	uint32_t reta;
1089 	struct nfp_net_hw *hw;
1090 
1091 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1092 
1093 	if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1094 		return -EINVAL;
1095 
1096 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1097 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1098 			"(%d) doesn't match the number hardware can supported "
1099 			"(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1100 		return -EINVAL;
1101 	}
1102 
1103 	/*
1104 	 * Reading Redirection Table. There are 128 8bit-entries which can be
1105 	 * manage as 32 32bit-entries
1106 	 */
1107 	for (i = 0; i < reta_size; i += 4) {
1108 		/* Handling 4 RSS entries per loop */
1109 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1110 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1111 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1112 
1113 		if (!mask)
1114 			continue;
1115 
1116 		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
1117 				    shift);
1118 		for (j = 0; j < 4; j++) {
1119 			if (!(mask & (0x1 << j)))
1120 				continue;
1121 			reta_conf[idx].reta[shift + j] =
1122 				(uint8_t)((reta >> (8 * j)) & 0xF);
1123 		}
1124 	}
1125 	return 0;
1126 }
1127 
1128 static int
nfp_net_rss_hash_write(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1129 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1130 			struct rte_eth_rss_conf *rss_conf)
1131 {
1132 	struct nfp_net_hw *hw;
1133 	uint64_t rss_hf;
1134 	uint32_t cfg_rss_ctrl = 0;
1135 	uint8_t key;
1136 	int i;
1137 
1138 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1139 
1140 	/* Writing the key byte a byte */
1141 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1142 		memcpy(&key, &rss_conf->rss_key[i], 1);
1143 		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1144 	}
1145 
1146 	rss_hf = rss_conf->rss_hf;
1147 
1148 	if (rss_hf & RTE_ETH_RSS_IPV4)
1149 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1150 
1151 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1152 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1153 
1154 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1155 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1156 
1157 	if (rss_hf & RTE_ETH_RSS_IPV6)
1158 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1159 
1160 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1161 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1162 
1163 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1164 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1165 
1166 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1167 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1168 
1169 	/* configuring where to apply the RSS hash */
1170 	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1171 
1172 	/* Writing the key size */
1173 	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1174 
1175 	return 0;
1176 }
1177 
1178 int
nfp_net_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1179 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1180 			struct rte_eth_rss_conf *rss_conf)
1181 {
1182 	uint32_t update;
1183 	uint64_t rss_hf;
1184 	struct nfp_net_hw *hw;
1185 
1186 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1187 
1188 	rss_hf = rss_conf->rss_hf;
1189 
1190 	/* Checking if RSS is enabled */
1191 	if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
1192 		if (rss_hf != 0) { /* Enable RSS? */
1193 			PMD_DRV_LOG(ERR, "RSS unsupported");
1194 			return -EINVAL;
1195 		}
1196 		return 0; /* Nothing to do */
1197 	}
1198 
1199 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1200 		PMD_DRV_LOG(ERR, "hash key too long");
1201 		return -EINVAL;
1202 	}
1203 
1204 	nfp_net_rss_hash_write(dev, rss_conf);
1205 
1206 	update = NFP_NET_CFG_UPDATE_RSS;
1207 
1208 	if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1209 		return -EIO;
1210 
1211 	return 0;
1212 }
1213 
1214 int
nfp_net_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1215 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1216 			  struct rte_eth_rss_conf *rss_conf)
1217 {
1218 	uint64_t rss_hf;
1219 	uint32_t cfg_rss_ctrl;
1220 	uint8_t key;
1221 	int i;
1222 	struct nfp_net_hw *hw;
1223 
1224 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1225 
1226 	if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1227 		return -EINVAL;
1228 
1229 	rss_hf = rss_conf->rss_hf;
1230 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1231 
1232 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
1233 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1234 
1235 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
1236 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1237 
1238 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
1239 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1240 
1241 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
1242 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1243 
1244 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
1245 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1246 
1247 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
1248 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1249 
1250 	/* Propagate current RSS hash functions to caller */
1251 	rss_conf->rss_hf = rss_hf;
1252 
1253 	/* Reading the key size */
1254 	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1255 
1256 	/* Reading the key byte a byte */
1257 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1258 		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1259 		memcpy(&rss_conf->rss_key[i], &key, 1);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 int
nfp_net_rss_config_default(struct rte_eth_dev * dev)1266 nfp_net_rss_config_default(struct rte_eth_dev *dev)
1267 {
1268 	struct rte_eth_conf *dev_conf;
1269 	struct rte_eth_rss_conf rss_conf;
1270 	struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
1271 	uint16_t rx_queues = dev->data->nb_rx_queues;
1272 	uint16_t queue;
1273 	int i, j, ret;
1274 
1275 	PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
1276 		rx_queues);
1277 
1278 	nfp_reta_conf[0].mask = ~0x0;
1279 	nfp_reta_conf[1].mask = ~0x0;
1280 
1281 	queue = 0;
1282 	for (i = 0; i < 0x40; i += 8) {
1283 		for (j = i; j < (i + 8); j++) {
1284 			nfp_reta_conf[0].reta[j] = queue;
1285 			nfp_reta_conf[1].reta[j] = queue++;
1286 			queue %= rx_queues;
1287 		}
1288 	}
1289 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
1290 	if (ret != 0)
1291 		return ret;
1292 
1293 	dev_conf = &dev->data->dev_conf;
1294 	if (!dev_conf) {
1295 		PMD_DRV_LOG(INFO, "wrong rss conf");
1296 		return -EINVAL;
1297 	}
1298 	rss_conf = dev_conf->rx_adv_conf.rss_conf;
1299 
1300 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
1301 
1302 	return ret;
1303 }
1304 
1305 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE);
1306 RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE);
1307 /*
1308  * Local variables:
1309  * c-file-style: "Linux"
1310  * indent-tabs-mode: t
1311  * End:
1312  */
1313