xref: /f-stack/dpdk/drivers/net/fm10k/fm10k_ethdev.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2013-2016 Intel Corporation
3  */
4 
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
10 #include <rte_dev.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
13 #include <rte_vect.h>
14 
15 #include "fm10k.h"
16 #include "base/fm10k_api.h"
17 
18 /* Default delay to acquire mailbox lock */
19 #define FM10K_MBXLOCK_DELAY_US 20
20 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
21 
22 #define MAIN_VSI_POOL_NUMBER 0
23 
24 /* Max try times to acquire switch status */
25 #define MAX_QUERY_SWITCH_STATE_TIMES 10
26 /* Wait interval to get switch status */
27 #define WAIT_SWITCH_MSG_US    100000
28 /* A period of quiescence for switch */
29 #define FM10K_SWITCH_QUIESCE_US 100000
30 /* Number of chars per uint32 type */
31 #define CHARS_PER_UINT32 (sizeof(uint32_t))
32 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
33 
34 /* default 1:1 map from queue ID to interrupt vector ID */
35 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
36 
37 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
38 #define MAX_LPORT_NUM    128
39 #define GLORT_FD_Q_BASE  0x40
40 #define GLORT_PF_MASK    0xFFC0
41 #define GLORT_FD_MASK    GLORT_PF_MASK
42 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
43 
44 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
45 static int fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
46 static int fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
47 static int fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
48 static int fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
49 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
50 static int
51 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
52 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
53 	const u8 *mac, bool add, uint32_t pool);
54 static void fm10k_tx_queue_release(void *queue);
55 static void fm10k_rx_queue_release(void *queue);
56 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
57 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
58 static int fm10k_check_ftag(struct rte_devargs *devargs);
59 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
60 
61 static int fm10k_dev_infos_get(struct rte_eth_dev *dev,
62 			       struct rte_eth_dev_info *dev_info);
63 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
64 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
65 static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
66 static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
67 
68 struct fm10k_xstats_name_off {
69 	char name[RTE_ETH_XSTATS_NAME_SIZE];
70 	unsigned offset;
71 };
72 
73 static const struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
74 	{"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
75 	{"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
76 	{"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
77 	{"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
78 	{"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
79 	{"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
80 	{"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
81 	{"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
82 		nodesc_drop)},
83 };
84 
85 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
86 		sizeof(fm10k_hw_stats_strings[0]))
87 
88 static const struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
89 	{"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
90 	{"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
91 	{"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
92 };
93 
94 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
95 		sizeof(fm10k_hw_stats_rx_q_strings[0]))
96 
97 static const struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
98 	{"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
99 	{"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
100 };
101 
102 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
103 		sizeof(fm10k_hw_stats_tx_q_strings[0]))
104 
105 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
106 		(FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
107 static int
108 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
109 
110 static void
fm10k_mbx_initlock(struct fm10k_hw * hw)111 fm10k_mbx_initlock(struct fm10k_hw *hw)
112 {
113 	rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
114 }
115 
116 static void
fm10k_mbx_lock(struct fm10k_hw * hw)117 fm10k_mbx_lock(struct fm10k_hw *hw)
118 {
119 	while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
120 		rte_delay_us(FM10K_MBXLOCK_DELAY_US);
121 }
122 
123 static void
fm10k_mbx_unlock(struct fm10k_hw * hw)124 fm10k_mbx_unlock(struct fm10k_hw *hw)
125 {
126 	rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
127 }
128 
129 /* Stubs needed for linkage when vPMD is disabled */
130 __rte_weak int
fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev * dev)131 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
132 {
133 	return -1;
134 }
135 
136 __rte_weak uint16_t
fm10k_recv_pkts_vec(__rte_unused void * rx_queue,__rte_unused struct rte_mbuf ** rx_pkts,__rte_unused uint16_t nb_pkts)137 fm10k_recv_pkts_vec(
138 	__rte_unused void *rx_queue,
139 	__rte_unused struct rte_mbuf **rx_pkts,
140 	__rte_unused uint16_t nb_pkts)
141 {
142 	return 0;
143 }
144 
145 __rte_weak uint16_t
fm10k_recv_scattered_pkts_vec(__rte_unused void * rx_queue,__rte_unused struct rte_mbuf ** rx_pkts,__rte_unused uint16_t nb_pkts)146 fm10k_recv_scattered_pkts_vec(
147 		__rte_unused void *rx_queue,
148 		__rte_unused struct rte_mbuf **rx_pkts,
149 		__rte_unused uint16_t nb_pkts)
150 {
151 	return 0;
152 }
153 
154 __rte_weak int
fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue * rxq)155 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
156 
157 {
158 	return -1;
159 }
160 
161 __rte_weak void
fm10k_rx_queue_release_mbufs_vec(__rte_unused struct fm10k_rx_queue * rxq)162 fm10k_rx_queue_release_mbufs_vec(
163 		__rte_unused struct fm10k_rx_queue *rxq)
164 {
165 	return;
166 }
167 
168 __rte_weak void
fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue * txq)169 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
170 {
171 	return;
172 }
173 
174 __rte_weak int
fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue * txq)175 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
176 {
177 	return -1;
178 }
179 
180 __rte_weak uint16_t
fm10k_xmit_fixed_burst_vec(__rte_unused void * tx_queue,__rte_unused struct rte_mbuf ** tx_pkts,__rte_unused uint16_t nb_pkts)181 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
182 			   __rte_unused struct rte_mbuf **tx_pkts,
183 			   __rte_unused uint16_t nb_pkts)
184 {
185 	return 0;
186 }
187 
188 /*
189  * reset queue to initial state, allocate software buffers used when starting
190  * device.
191  * return 0 on success
192  * return -ENOMEM if buffers cannot be allocated
193  * return -EINVAL if buffers do not satisfy alignment condition
194  */
195 static inline int
rx_queue_reset(struct fm10k_rx_queue * q)196 rx_queue_reset(struct fm10k_rx_queue *q)
197 {
198 	static const union fm10k_rx_desc zero = {{0} };
199 	uint64_t dma_addr;
200 	int i, diag;
201 	PMD_INIT_FUNC_TRACE();
202 
203 	diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
204 	if (diag != 0)
205 		return -ENOMEM;
206 
207 	for (i = 0; i < q->nb_desc; ++i) {
208 		fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
209 		if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
210 			rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
211 						q->nb_desc);
212 			return -EINVAL;
213 		}
214 		dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
215 		q->hw_ring[i].q.pkt_addr = dma_addr;
216 		q->hw_ring[i].q.hdr_addr = dma_addr;
217 	}
218 
219 	/* initialize extra software ring entries. Space for these extra
220 	 * entries is always allocated.
221 	 */
222 	memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
223 	for (i = 0; i < q->nb_fake_desc; ++i) {
224 		q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
225 		q->hw_ring[q->nb_desc + i] = zero;
226 	}
227 
228 	q->next_dd = 0;
229 	q->next_alloc = 0;
230 	q->next_trigger = q->alloc_thresh - 1;
231 	FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
232 	q->rxrearm_start = 0;
233 	q->rxrearm_nb = 0;
234 
235 	return 0;
236 }
237 
238 /*
239  * clean queue, descriptor rings, free software buffers used when stopping
240  * device.
241  */
242 static inline void
rx_queue_clean(struct fm10k_rx_queue * q)243 rx_queue_clean(struct fm10k_rx_queue *q)
244 {
245 	union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
246 	uint32_t i;
247 	PMD_INIT_FUNC_TRACE();
248 
249 	/* zero descriptor rings */
250 	for (i = 0; i < q->nb_desc; ++i)
251 		q->hw_ring[i] = zero;
252 
253 	/* zero faked descriptors */
254 	for (i = 0; i < q->nb_fake_desc; ++i)
255 		q->hw_ring[q->nb_desc + i] = zero;
256 
257 	/* vPMD driver has a different way of releasing mbufs. */
258 	if (q->rx_using_sse) {
259 		fm10k_rx_queue_release_mbufs_vec(q);
260 		return;
261 	}
262 
263 	/* free software buffers */
264 	for (i = 0; i < q->nb_desc; ++i) {
265 		if (q->sw_ring[i]) {
266 			rte_pktmbuf_free_seg(q->sw_ring[i]);
267 			q->sw_ring[i] = NULL;
268 		}
269 	}
270 }
271 
272 /*
273  * free all queue memory used when releasing the queue (i.e. configure)
274  */
275 static inline void
rx_queue_free(struct fm10k_rx_queue * q)276 rx_queue_free(struct fm10k_rx_queue *q)
277 {
278 	PMD_INIT_FUNC_TRACE();
279 	if (q) {
280 		PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
281 		rx_queue_clean(q);
282 		if (q->sw_ring) {
283 			rte_free(q->sw_ring);
284 			q->sw_ring = NULL;
285 		}
286 		rte_free(q);
287 		q = NULL;
288 	}
289 }
290 
291 /*
292  * disable RX queue, wait unitl HW finished necessary flush operation
293  */
294 static inline int
rx_queue_disable(struct fm10k_hw * hw,uint16_t qnum)295 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
296 {
297 	uint32_t reg, i;
298 
299 	reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
300 	FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
301 			reg & ~FM10K_RXQCTL_ENABLE);
302 
303 	/* Wait 100us at most */
304 	for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
305 		rte_delay_us(1);
306 		reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
307 		if (!(reg & FM10K_RXQCTL_ENABLE))
308 			break;
309 	}
310 
311 	if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
312 		return -1;
313 
314 	return 0;
315 }
316 
317 /*
318  * reset queue to initial state, allocate software buffers used when starting
319  * device
320  */
321 static inline void
tx_queue_reset(struct fm10k_tx_queue * q)322 tx_queue_reset(struct fm10k_tx_queue *q)
323 {
324 	PMD_INIT_FUNC_TRACE();
325 	q->last_free = 0;
326 	q->next_free = 0;
327 	q->nb_used = 0;
328 	q->nb_free = q->nb_desc - 1;
329 	fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
330 	FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
331 }
332 
333 /*
334  * clean queue, descriptor rings, free software buffers used when stopping
335  * device
336  */
337 static inline void
tx_queue_clean(struct fm10k_tx_queue * q)338 tx_queue_clean(struct fm10k_tx_queue *q)
339 {
340 	struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
341 	uint32_t i;
342 	PMD_INIT_FUNC_TRACE();
343 
344 	/* zero descriptor rings */
345 	for (i = 0; i < q->nb_desc; ++i)
346 		q->hw_ring[i] = zero;
347 
348 	/* free software buffers */
349 	for (i = 0; i < q->nb_desc; ++i) {
350 		if (q->sw_ring[i]) {
351 			rte_pktmbuf_free_seg(q->sw_ring[i]);
352 			q->sw_ring[i] = NULL;
353 		}
354 	}
355 }
356 
357 /*
358  * free all queue memory used when releasing the queue (i.e. configure)
359  */
360 static inline void
tx_queue_free(struct fm10k_tx_queue * q)361 tx_queue_free(struct fm10k_tx_queue *q)
362 {
363 	PMD_INIT_FUNC_TRACE();
364 	if (q) {
365 		PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
366 		tx_queue_clean(q);
367 		if (q->rs_tracker.list) {
368 			rte_free(q->rs_tracker.list);
369 			q->rs_tracker.list = NULL;
370 		}
371 		if (q->sw_ring) {
372 			rte_free(q->sw_ring);
373 			q->sw_ring = NULL;
374 		}
375 		rte_free(q);
376 		q = NULL;
377 	}
378 }
379 
380 /*
381  * disable TX queue, wait unitl HW finished necessary flush operation
382  */
383 static inline int
tx_queue_disable(struct fm10k_hw * hw,uint16_t qnum)384 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
385 {
386 	uint32_t reg, i;
387 
388 	reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
389 	FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
390 			reg & ~FM10K_TXDCTL_ENABLE);
391 
392 	/* Wait 100us at most */
393 	for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
394 		rte_delay_us(1);
395 		reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
396 		if (!(reg & FM10K_TXDCTL_ENABLE))
397 			break;
398 	}
399 
400 	if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
401 		return -1;
402 
403 	return 0;
404 }
405 
406 static int
fm10k_check_mq_mode(struct rte_eth_dev * dev)407 fm10k_check_mq_mode(struct rte_eth_dev *dev)
408 {
409 	enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
410 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
411 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
412 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
413 
414 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
415 
416 	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
417 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
418 		return -EINVAL;
419 	}
420 
421 	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
422 		return 0;
423 
424 	if (hw->mac.type == fm10k_mac_vf) {
425 		PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
426 		return -EINVAL;
427 	}
428 
429 	/* Check VMDQ queue pool number */
430 	if (vmdq_conf->nb_queue_pools >
431 			sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
432 			vmdq_conf->nb_queue_pools > nb_rx_q) {
433 		PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
434 			vmdq_conf->nb_queue_pools);
435 		return -EINVAL;
436 	}
437 
438 	return 0;
439 }
440 
441 static const struct fm10k_txq_ops def_txq_ops = {
442 	.reset = tx_queue_reset,
443 };
444 
445 static int
fm10k_dev_configure(struct rte_eth_dev * dev)446 fm10k_dev_configure(struct rte_eth_dev *dev)
447 {
448 	int ret;
449 
450 	PMD_INIT_FUNC_TRACE();
451 
452 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
453 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
454 
455 	/* multipe queue mode checking */
456 	ret  = fm10k_check_mq_mode(dev);
457 	if (ret != 0) {
458 		PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
459 			    ret);
460 		return ret;
461 	}
462 
463 	dev->data->scattered_rx = 0;
464 
465 	return 0;
466 }
467 
468 static void
fm10k_dev_vmdq_rx_configure(struct rte_eth_dev * dev)469 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
470 {
471 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
472 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
473 	uint32_t i;
474 
475 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
476 
477 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
478 		if (!vmdq_conf->pool_map[i].pools)
479 			continue;
480 		fm10k_mbx_lock(hw);
481 		fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
482 		fm10k_mbx_unlock(hw);
483 	}
484 }
485 
486 static void
fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev * dev)487 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
488 {
489 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490 
491 	/* Add default mac address */
492 	fm10k_MAC_filter_set(dev, hw->mac.addr, true,
493 		MAIN_VSI_POOL_NUMBER);
494 }
495 
496 static void
fm10k_dev_rss_configure(struct rte_eth_dev * dev)497 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
498 {
499 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
500 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
501 	uint32_t mrqc, *key, i, reta, j;
502 	uint64_t hf;
503 
504 #define RSS_KEY_SIZE 40
505 	static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
506 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
507 		0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
508 		0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
509 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
510 		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
511 	};
512 
513 	if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
514 		dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
515 		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
516 		return;
517 	}
518 
519 	/* random key is rss_intel_key (default) or user provided (rss_key) */
520 	if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
521 		key = (uint32_t *)rss_intel_key;
522 	else
523 		key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
524 
525 	/* Now fill our hash function seeds, 4 bytes at a time */
526 	for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
527 		FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
528 
529 	/*
530 	 * Fill in redirection table
531 	 * The byte-swap is needed because NIC registers are in
532 	 * little-endian order.
533 	 */
534 	reta = 0;
535 	for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
536 		if (j == dev->data->nb_rx_queues)
537 			j = 0;
538 		reta = (reta << CHAR_BIT) | j;
539 		if ((i & 3) == 3)
540 			FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
541 					rte_bswap32(reta));
542 	}
543 
544 	/*
545 	 * Generate RSS hash based on packet types, TCP/UDP
546 	 * port numbers and/or IPv4/v6 src and dst addresses
547 	 */
548 	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
549 	mrqc = 0;
550 	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
551 	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
552 	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
553 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
554 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
555 	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
556 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
557 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
558 	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
559 
560 	if (mrqc == 0) {
561 		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
562 			"supported", hf);
563 		return;
564 	}
565 
566 	FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
567 }
568 
569 static void
fm10k_dev_logic_port_update(struct rte_eth_dev * dev,uint16_t nb_lport_new)570 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
571 {
572 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
573 	uint32_t i;
574 
575 	for (i = 0; i < nb_lport_new; i++) {
576 		/* Set unicast mode by default. App can change
577 		 * to other mode in other API func.
578 		 */
579 		fm10k_mbx_lock(hw);
580 		hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
581 			FM10K_XCAST_MODE_NONE);
582 		fm10k_mbx_unlock(hw);
583 	}
584 }
585 
586 static void
fm10k_dev_mq_rx_configure(struct rte_eth_dev * dev)587 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
588 {
589 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
590 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
591 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
592 	struct fm10k_macvlan_filter_info *macvlan;
593 	uint16_t nb_queue_pools = 0; /* pool number in configuration */
594 	uint16_t nb_lport_new;
595 
596 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
597 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
598 
599 	fm10k_dev_rss_configure(dev);
600 
601 	/* only PF supports VMDQ */
602 	if (hw->mac.type != fm10k_mac_pf)
603 		return;
604 
605 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
606 		nb_queue_pools = vmdq_conf->nb_queue_pools;
607 
608 	/* no pool number change, no need to update logic port and VLAN/MAC */
609 	if (macvlan->nb_queue_pools == nb_queue_pools)
610 		return;
611 
612 	nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
613 	fm10k_dev_logic_port_update(dev, nb_lport_new);
614 
615 	/* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
616 	memset(dev->data->mac_addrs, 0,
617 		RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
618 	rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
619 		&dev->data->mac_addrs[0]);
620 	memset(macvlan, 0, sizeof(*macvlan));
621 	macvlan->nb_queue_pools = nb_queue_pools;
622 
623 	if (nb_queue_pools)
624 		fm10k_dev_vmdq_rx_configure(dev);
625 	else
626 		fm10k_dev_pf_main_vsi_reset(dev);
627 }
628 
629 static int
fm10k_dev_tx_init(struct rte_eth_dev * dev)630 fm10k_dev_tx_init(struct rte_eth_dev *dev)
631 {
632 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
633 	int i, ret;
634 	struct fm10k_tx_queue *txq;
635 	uint64_t base_addr;
636 	uint32_t size;
637 
638 	/* Disable TXINT to avoid possible interrupt */
639 	for (i = 0; i < hw->mac.max_queues; i++)
640 		FM10K_WRITE_REG(hw, FM10K_TXINT(i),
641 				3 << FM10K_TXINT_TIMER_SHIFT);
642 
643 	/* Setup TX queue */
644 	for (i = 0; i < dev->data->nb_tx_queues; ++i) {
645 		txq = dev->data->tx_queues[i];
646 		base_addr = txq->hw_ring_phys_addr;
647 		size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
648 
649 		/* disable queue to avoid issues while updating state */
650 		ret = tx_queue_disable(hw, i);
651 		if (ret) {
652 			PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
653 			return -1;
654 		}
655 		/* Enable use of FTAG bit in TX descriptor, PFVTCTL
656 		 * register is read-only for VF.
657 		 */
658 		if (fm10k_check_ftag(dev->device->devargs)) {
659 			if (hw->mac.type == fm10k_mac_pf) {
660 				FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
661 						FM10K_PFVTCTL_FTAG_DESC_ENABLE);
662 				PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
663 			} else {
664 				PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
665 				return -ENOTSUP;
666 			}
667 		}
668 
669 		/* set location and size for descriptor ring */
670 		FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
671 				base_addr & UINT64_LOWER_32BITS_MASK);
672 		FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
673 				base_addr >> (CHAR_BIT * sizeof(uint32_t)));
674 		FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
675 
676 		/* assign default SGLORT for each TX queue by PF */
677 		if (hw->mac.type == fm10k_mac_pf)
678 			FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
679 	}
680 
681 	/* set up vector or scalar TX function as appropriate */
682 	fm10k_set_tx_function(dev);
683 
684 	return 0;
685 }
686 
687 static int
fm10k_dev_rx_init(struct rte_eth_dev * dev)688 fm10k_dev_rx_init(struct rte_eth_dev *dev)
689 {
690 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691 	struct fm10k_macvlan_filter_info *macvlan;
692 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
693 	struct rte_intr_handle *intr_handle = &pdev->intr_handle;
694 	int i, ret;
695 	struct fm10k_rx_queue *rxq;
696 	uint64_t base_addr;
697 	uint32_t size;
698 	uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
699 	uint32_t logic_port = hw->mac.dglort_map;
700 	uint16_t buf_size;
701 	uint16_t queue_stride = 0;
702 
703 	/* enable RXINT for interrupt mode */
704 	i = 0;
705 	if (rte_intr_dp_is_en(intr_handle)) {
706 		for (; i < dev->data->nb_rx_queues; i++) {
707 			FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
708 			if (hw->mac.type == fm10k_mac_pf)
709 				FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
710 					FM10K_ITR_AUTOMASK |
711 					FM10K_ITR_MASK_CLEAR);
712 			else
713 				FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
714 					FM10K_ITR_AUTOMASK |
715 					FM10K_ITR_MASK_CLEAR);
716 		}
717 	}
718 	/* Disable other RXINT to avoid possible interrupt */
719 	for (; i < hw->mac.max_queues; i++)
720 		FM10K_WRITE_REG(hw, FM10K_RXINT(i),
721 			3 << FM10K_RXINT_TIMER_SHIFT);
722 
723 	/* Setup RX queues */
724 	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
725 		rxq = dev->data->rx_queues[i];
726 		base_addr = rxq->hw_ring_phys_addr;
727 		size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
728 
729 		/* disable queue to avoid issues while updating state */
730 		ret = rx_queue_disable(hw, i);
731 		if (ret) {
732 			PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
733 			return -1;
734 		}
735 
736 		/* Setup the Base and Length of the Rx Descriptor Ring */
737 		FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
738 				base_addr & UINT64_LOWER_32BITS_MASK);
739 		FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
740 				base_addr >> (CHAR_BIT * sizeof(uint32_t)));
741 		FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
742 
743 		/* Configure the Rx buffer size for one buff without split */
744 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
745 			RTE_PKTMBUF_HEADROOM);
746 		/* As RX buffer is aligned to 512B within mbuf, some bytes are
747 		 * reserved for this purpose, and the worst case could be 511B.
748 		 * But SRR reg assumes all buffers have the same size. In order
749 		 * to fill the gap, we'll have to consider the worst case and
750 		 * assume 512B is reserved. If we don't do so, it's possible
751 		 * for HW to overwrite data to next mbuf.
752 		 */
753 		buf_size -= FM10K_RX_DATABUF_ALIGN;
754 
755 		FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
756 				(buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
757 				FM10K_SRRCTL_LOOPBACK_SUPPRESS);
758 
759 		/* It adds dual VLAN length for supporting dual VLAN */
760 		if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
761 				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
762 			rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
763 			uint32_t reg;
764 			dev->data->scattered_rx = 1;
765 			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
766 			reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
767 			FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
768 		}
769 
770 		/* Enable drop on empty, it's RO for VF */
771 		if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
772 			rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
773 
774 		FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
775 		FM10K_WRITE_FLUSH(hw);
776 	}
777 
778 	/* Configure VMDQ/RSS if applicable */
779 	fm10k_dev_mq_rx_configure(dev);
780 
781 	/* Decide the best RX function */
782 	fm10k_set_rx_function(dev);
783 
784 	/* update RX_SGLORT for loopback suppress*/
785 	if (hw->mac.type != fm10k_mac_pf)
786 		return 0;
787 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
788 	if (macvlan->nb_queue_pools)
789 		queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
790 	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
791 		if (i && queue_stride && !(i % queue_stride))
792 			logic_port++;
793 		FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
794 	}
795 
796 	return 0;
797 }
798 
799 static int
fm10k_dev_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)800 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
801 {
802 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
803 	int err;
804 	uint32_t reg;
805 	struct fm10k_rx_queue *rxq;
806 
807 	PMD_INIT_FUNC_TRACE();
808 
809 	rxq = dev->data->rx_queues[rx_queue_id];
810 	err = rx_queue_reset(rxq);
811 	if (err == -ENOMEM) {
812 		PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
813 		return err;
814 	} else if (err == -EINVAL) {
815 		PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
816 			" %d", err);
817 		return err;
818 	}
819 
820 	/* Setup the HW Rx Head and Tail Descriptor Pointers
821 	 * Note: this must be done AFTER the queue is enabled on real
822 	 * hardware, but BEFORE the queue is enabled when using the
823 	 * emulation platform. Do it in both places for now and remove
824 	 * this comment and the following two register writes when the
825 	 * emulation platform is no longer being used.
826 	 */
827 	FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
828 	FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
829 
830 	/* Set PF ownership flag for PF devices */
831 	reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
832 	if (hw->mac.type == fm10k_mac_pf)
833 		reg |= FM10K_RXQCTL_PF;
834 	reg |= FM10K_RXQCTL_ENABLE;
835 	/* enable RX queue */
836 	FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
837 	FM10K_WRITE_FLUSH(hw);
838 
839 	/* Setup the HW Rx Head and Tail Descriptor Pointers
840 	 * Note: this must be done AFTER the queue is enabled
841 	 */
842 	FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
843 	FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
844 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
845 
846 	return 0;
847 }
848 
849 static int
fm10k_dev_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)850 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
851 {
852 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
853 
854 	PMD_INIT_FUNC_TRACE();
855 
856 	/* Disable RX queue */
857 	rx_queue_disable(hw, rx_queue_id);
858 
859 	/* Free mbuf and clean HW ring */
860 	rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
861 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
862 
863 	return 0;
864 }
865 
866 static int
fm10k_dev_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)867 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
868 {
869 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
870 	/** @todo - this should be defined in the shared code */
871 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY	0x00010000
872 	uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
873 	struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
874 
875 	PMD_INIT_FUNC_TRACE();
876 
877 	q->ops->reset(q);
878 
879 	/* reset head and tail pointers */
880 	FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
881 	FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
882 
883 	/* enable TX queue */
884 	FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
885 				FM10K_TXDCTL_ENABLE | txdctl);
886 	FM10K_WRITE_FLUSH(hw);
887 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
888 
889 	return 0;
890 }
891 
892 static int
fm10k_dev_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)893 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
894 {
895 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
896 
897 	PMD_INIT_FUNC_TRACE();
898 
899 	tx_queue_disable(hw, tx_queue_id);
900 	tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
901 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
902 
903 	return 0;
904 }
905 
fm10k_glort_valid(struct fm10k_hw * hw)906 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
907 {
908 	return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
909 		!= FM10K_DGLORTMAP_NONE);
910 }
911 
912 static int
fm10k_dev_promiscuous_enable(struct rte_eth_dev * dev)913 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
914 {
915 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
916 	int status;
917 
918 	PMD_INIT_FUNC_TRACE();
919 
920 	/* Return if it didn't acquire valid glort range */
921 	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
922 		return 0;
923 
924 	fm10k_mbx_lock(hw);
925 	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
926 				FM10K_XCAST_MODE_PROMISC);
927 	fm10k_mbx_unlock(hw);
928 
929 	if (status != FM10K_SUCCESS) {
930 		PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
931 		return -EAGAIN;
932 	}
933 
934 	return 0;
935 }
936 
937 static int
fm10k_dev_promiscuous_disable(struct rte_eth_dev * dev)938 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
939 {
940 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941 	uint8_t mode;
942 	int status;
943 
944 	PMD_INIT_FUNC_TRACE();
945 
946 	/* Return if it didn't acquire valid glort range */
947 	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
948 		return 0;
949 
950 	if (dev->data->all_multicast == 1)
951 		mode = FM10K_XCAST_MODE_ALLMULTI;
952 	else
953 		mode = FM10K_XCAST_MODE_NONE;
954 
955 	fm10k_mbx_lock(hw);
956 	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
957 				mode);
958 	fm10k_mbx_unlock(hw);
959 
960 	if (status != FM10K_SUCCESS) {
961 		PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
962 		return -EAGAIN;
963 	}
964 
965 	return 0;
966 }
967 
968 static int
fm10k_dev_allmulticast_enable(struct rte_eth_dev * dev)969 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
970 {
971 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
972 	int status;
973 
974 	PMD_INIT_FUNC_TRACE();
975 
976 	/* Return if it didn't acquire valid glort range */
977 	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
978 		return 0;
979 
980 	/* If promiscuous mode is enabled, it doesn't make sense to enable
981 	 * allmulticast and disable promiscuous since fm10k only can select
982 	 * one of the modes.
983 	 */
984 	if (dev->data->promiscuous) {
985 		PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
986 			"needn't enable allmulticast");
987 		return 0;
988 	}
989 
990 	fm10k_mbx_lock(hw);
991 	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
992 				FM10K_XCAST_MODE_ALLMULTI);
993 	fm10k_mbx_unlock(hw);
994 
995 	if (status != FM10K_SUCCESS) {
996 		PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
997 		return -EAGAIN;
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 static int
fm10k_dev_allmulticast_disable(struct rte_eth_dev * dev)1004 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1005 {
1006 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1007 	int status;
1008 
1009 	PMD_INIT_FUNC_TRACE();
1010 
1011 	/* Return if it didn't acquire valid glort range */
1012 	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1013 		return 0;
1014 
1015 	if (dev->data->promiscuous) {
1016 		PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1017 			"since promisc mode is enabled");
1018 		return -EINVAL;
1019 	}
1020 
1021 	fm10k_mbx_lock(hw);
1022 	/* Change mode to unicast mode */
1023 	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1024 				FM10K_XCAST_MODE_NONE);
1025 	fm10k_mbx_unlock(hw);
1026 
1027 	if (status != FM10K_SUCCESS) {
1028 		PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1029 		return -EAGAIN;
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static void
fm10k_dev_dglort_map_configure(struct rte_eth_dev * dev)1036 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1037 {
1038 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1039 	uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1040 	uint16_t nb_queue_pools;
1041 	struct fm10k_macvlan_filter_info *macvlan;
1042 
1043 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1044 	nb_queue_pools = macvlan->nb_queue_pools;
1045 	pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0;
1046 	rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len;
1047 
1048 	/* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1049 	dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1050 	dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1051 			hw->mac.dglort_map;
1052 	FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1053 	/* Configure VMDQ/RSS DGlort Decoder */
1054 	FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1055 
1056 	/* Flow Director configurations, only queue number is valid. */
1057 	dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1);
1058 	dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1059 			(hw->mac.dglort_map + GLORT_FD_Q_BASE);
1060 	FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1061 	FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1062 
1063 	/* Invalidate all other GLORT entries */
1064 	for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1065 		FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1066 				FM10K_DGLORTMAP_NONE);
1067 }
1068 
1069 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1070 static int
fm10k_dev_start(struct rte_eth_dev * dev)1071 fm10k_dev_start(struct rte_eth_dev *dev)
1072 {
1073 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1074 	int i, diag;
1075 
1076 	PMD_INIT_FUNC_TRACE();
1077 
1078 	/* stop, init, then start the hw */
1079 	diag = fm10k_stop_hw(hw);
1080 	if (diag != FM10K_SUCCESS) {
1081 		PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1082 		return -EIO;
1083 	}
1084 
1085 	diag = fm10k_init_hw(hw);
1086 	if (diag != FM10K_SUCCESS) {
1087 		PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1088 		return -EIO;
1089 	}
1090 
1091 	diag = fm10k_start_hw(hw);
1092 	if (diag != FM10K_SUCCESS) {
1093 		PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1094 		return -EIO;
1095 	}
1096 
1097 	diag = fm10k_dev_tx_init(dev);
1098 	if (diag) {
1099 		PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1100 		return diag;
1101 	}
1102 
1103 	if (fm10k_dev_rxq_interrupt_setup(dev))
1104 		return -EIO;
1105 
1106 	diag = fm10k_dev_rx_init(dev);
1107 	if (diag) {
1108 		PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1109 		return diag;
1110 	}
1111 
1112 	if (hw->mac.type == fm10k_mac_pf)
1113 		fm10k_dev_dglort_map_configure(dev);
1114 
1115 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1116 		struct fm10k_rx_queue *rxq;
1117 		rxq = dev->data->rx_queues[i];
1118 
1119 		if (rxq->rx_deferred_start)
1120 			continue;
1121 		diag = fm10k_dev_rx_queue_start(dev, i);
1122 		if (diag != 0) {
1123 			int j;
1124 			for (j = 0; j < i; ++j)
1125 				rx_queue_clean(dev->data->rx_queues[j]);
1126 			return diag;
1127 		}
1128 	}
1129 
1130 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1131 		struct fm10k_tx_queue *txq;
1132 		txq = dev->data->tx_queues[i];
1133 
1134 		if (txq->tx_deferred_start)
1135 			continue;
1136 		diag = fm10k_dev_tx_queue_start(dev, i);
1137 		if (diag != 0) {
1138 			int j;
1139 			for (j = 0; j < i; ++j)
1140 				tx_queue_clean(dev->data->tx_queues[j]);
1141 			for (j = 0; j < dev->data->nb_rx_queues; ++j)
1142 				rx_queue_clean(dev->data->rx_queues[j]);
1143 			return diag;
1144 		}
1145 	}
1146 
1147 	/* Update default vlan when not in VMDQ mode */
1148 	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1149 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1150 
1151 	fm10k_link_update(dev, 0);
1152 
1153 	return 0;
1154 }
1155 
1156 static int
fm10k_dev_stop(struct rte_eth_dev * dev)1157 fm10k_dev_stop(struct rte_eth_dev *dev)
1158 {
1159 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1160 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1161 	struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1162 	int i;
1163 
1164 	PMD_INIT_FUNC_TRACE();
1165 	dev->data->dev_started = 0;
1166 
1167 	if (dev->data->tx_queues)
1168 		for (i = 0; i < dev->data->nb_tx_queues; i++)
1169 			fm10k_dev_tx_queue_stop(dev, i);
1170 
1171 	if (dev->data->rx_queues)
1172 		for (i = 0; i < dev->data->nb_rx_queues; i++)
1173 			fm10k_dev_rx_queue_stop(dev, i);
1174 
1175 	/* Disable datapath event */
1176 	if (rte_intr_dp_is_en(intr_handle)) {
1177 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
1178 			FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1179 				3 << FM10K_RXINT_TIMER_SHIFT);
1180 			if (hw->mac.type == fm10k_mac_pf)
1181 				FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1182 					FM10K_ITR_MASK_SET);
1183 			else
1184 				FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1185 					FM10K_ITR_MASK_SET);
1186 		}
1187 	}
1188 	/* Clean datapath event and queue/vec mapping */
1189 	rte_intr_efd_disable(intr_handle);
1190 	rte_free(intr_handle->intr_vec);
1191 	intr_handle->intr_vec = NULL;
1192 
1193 	return 0;
1194 }
1195 
1196 static void
fm10k_dev_queue_release(struct rte_eth_dev * dev)1197 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1198 {
1199 	int i;
1200 
1201 	PMD_INIT_FUNC_TRACE();
1202 
1203 	if (dev->data->tx_queues) {
1204 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
1205 			struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1206 
1207 			tx_queue_free(txq);
1208 		}
1209 	}
1210 
1211 	if (dev->data->rx_queues) {
1212 		for (i = 0; i < dev->data->nb_rx_queues; i++)
1213 			fm10k_rx_queue_release(dev->data->rx_queues[i]);
1214 	}
1215 }
1216 
1217 static int
fm10k_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)1218 fm10k_link_update(struct rte_eth_dev *dev,
1219 	__rte_unused int wait_to_complete)
1220 {
1221 	struct fm10k_dev_info *dev_info =
1222 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1223 	PMD_INIT_FUNC_TRACE();
1224 
1225 	dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
1226 	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1227 	dev->data->dev_link.link_status =
1228 		dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1229 	dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
1230 
1231 	return 0;
1232 }
1233 
fm10k_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned limit)1234 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1235 	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1236 {
1237 	unsigned i, q;
1238 	unsigned count = 0;
1239 
1240 	if (xstats_names != NULL) {
1241 		/* Note: limit checked in rte_eth_xstats_names() */
1242 
1243 		/* Global stats */
1244 		for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1245 			snprintf(xstats_names[count].name,
1246 				sizeof(xstats_names[count].name),
1247 				"%s", fm10k_hw_stats_strings[count].name);
1248 			count++;
1249 		}
1250 
1251 		/* PF queue stats */
1252 		for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1253 			for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1254 				snprintf(xstats_names[count].name,
1255 					sizeof(xstats_names[count].name),
1256 					"rx_q%u_%s", q,
1257 					fm10k_hw_stats_rx_q_strings[i].name);
1258 				count++;
1259 			}
1260 			for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1261 				snprintf(xstats_names[count].name,
1262 					sizeof(xstats_names[count].name),
1263 					"tx_q%u_%s", q,
1264 					fm10k_hw_stats_tx_q_strings[i].name);
1265 				count++;
1266 			}
1267 		}
1268 	}
1269 	return FM10K_NB_XSTATS;
1270 }
1271 
1272 static int
fm10k_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)1273 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1274 		 unsigned n)
1275 {
1276 	struct fm10k_hw_stats *hw_stats =
1277 		FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1278 	unsigned i, q, count = 0;
1279 
1280 	if (n < FM10K_NB_XSTATS)
1281 		return FM10K_NB_XSTATS;
1282 
1283 	/* Global stats */
1284 	for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1285 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1286 			fm10k_hw_stats_strings[count].offset);
1287 		xstats[count].id = count;
1288 		count++;
1289 	}
1290 
1291 	/* PF queue stats */
1292 	for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1293 		for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1294 			xstats[count].value =
1295 				*(uint64_t *)(((char *)&hw_stats->q[q]) +
1296 				fm10k_hw_stats_rx_q_strings[i].offset);
1297 			xstats[count].id = count;
1298 			count++;
1299 		}
1300 		for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1301 			xstats[count].value =
1302 				*(uint64_t *)(((char *)&hw_stats->q[q]) +
1303 				fm10k_hw_stats_tx_q_strings[i].offset);
1304 			xstats[count].id = count;
1305 			count++;
1306 		}
1307 	}
1308 
1309 	return FM10K_NB_XSTATS;
1310 }
1311 
1312 static int
fm10k_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)1313 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1314 {
1315 	uint64_t ipackets, opackets, ibytes, obytes, imissed;
1316 	struct fm10k_hw *hw =
1317 		FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1318 	struct fm10k_hw_stats *hw_stats =
1319 		FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1320 	int i;
1321 
1322 	PMD_INIT_FUNC_TRACE();
1323 
1324 	fm10k_update_hw_stats(hw, hw_stats);
1325 
1326 	ipackets = opackets = ibytes = obytes = imissed = 0;
1327 	for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1328 		(i < hw->mac.max_queues); ++i) {
1329 		stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1330 		stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1331 		stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1332 		stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1333 		stats->q_errors[i]   = hw_stats->q[i].rx_drops.count;
1334 		ipackets += stats->q_ipackets[i];
1335 		opackets += stats->q_opackets[i];
1336 		ibytes   += stats->q_ibytes[i];
1337 		obytes   += stats->q_obytes[i];
1338 		imissed  += stats->q_errors[i];
1339 	}
1340 	stats->ipackets = ipackets;
1341 	stats->opackets = opackets;
1342 	stats->ibytes = ibytes;
1343 	stats->obytes = obytes;
1344 	stats->imissed = imissed;
1345 	return 0;
1346 }
1347 
1348 static int
fm10k_stats_reset(struct rte_eth_dev * dev)1349 fm10k_stats_reset(struct rte_eth_dev *dev)
1350 {
1351 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1352 	struct fm10k_hw_stats *hw_stats =
1353 		FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1354 
1355 	PMD_INIT_FUNC_TRACE();
1356 
1357 	memset(hw_stats, 0, sizeof(*hw_stats));
1358 	fm10k_rebind_hw_stats(hw, hw_stats);
1359 
1360 	return 0;
1361 }
1362 
1363 static int
fm10k_dev_infos_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1364 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1365 	struct rte_eth_dev_info *dev_info)
1366 {
1367 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1368 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1369 
1370 	PMD_INIT_FUNC_TRACE();
1371 
1372 	dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1373 	dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1374 	dev_info->max_rx_queues      = hw->mac.max_queues;
1375 	dev_info->max_tx_queues      = hw->mac.max_queues;
1376 	dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1377 	dev_info->max_hash_mac_addrs = 0;
1378 	dev_info->max_vfs            = pdev->max_vfs;
1379 	dev_info->vmdq_pool_base     = 0;
1380 	dev_info->vmdq_queue_base    = 0;
1381 	dev_info->max_vmdq_pools     = ETH_32_POOLS;
1382 	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1383 	dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
1384 	dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
1385 				    dev_info->rx_queue_offload_capa;
1386 	dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
1387 	dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
1388 				    dev_info->tx_queue_offload_capa;
1389 
1390 	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1391 	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1392 	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1393 					ETH_RSS_IPV6 |
1394 					ETH_RSS_IPV6_EX |
1395 					ETH_RSS_NONFRAG_IPV4_TCP |
1396 					ETH_RSS_NONFRAG_IPV6_TCP |
1397 					ETH_RSS_IPV6_TCP_EX |
1398 					ETH_RSS_NONFRAG_IPV4_UDP |
1399 					ETH_RSS_NONFRAG_IPV6_UDP |
1400 					ETH_RSS_IPV6_UDP_EX;
1401 
1402 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1403 		.rx_thresh = {
1404 			.pthresh = FM10K_DEFAULT_RX_PTHRESH,
1405 			.hthresh = FM10K_DEFAULT_RX_HTHRESH,
1406 			.wthresh = FM10K_DEFAULT_RX_WTHRESH,
1407 		},
1408 		.rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1409 		.rx_drop_en = 0,
1410 		.offloads = 0,
1411 	};
1412 
1413 	dev_info->default_txconf = (struct rte_eth_txconf) {
1414 		.tx_thresh = {
1415 			.pthresh = FM10K_DEFAULT_TX_PTHRESH,
1416 			.hthresh = FM10K_DEFAULT_TX_HTHRESH,
1417 			.wthresh = FM10K_DEFAULT_TX_WTHRESH,
1418 		},
1419 		.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1420 		.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1421 		.offloads = 0,
1422 	};
1423 
1424 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1425 		.nb_max = FM10K_MAX_RX_DESC,
1426 		.nb_min = FM10K_MIN_RX_DESC,
1427 		.nb_align = FM10K_MULT_RX_DESC,
1428 	};
1429 
1430 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1431 		.nb_max = FM10K_MAX_TX_DESC,
1432 		.nb_min = FM10K_MIN_TX_DESC,
1433 		.nb_align = FM10K_MULT_TX_DESC,
1434 		.nb_seg_max = FM10K_TX_MAX_SEG,
1435 		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1436 	};
1437 
1438 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1439 			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1440 			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1441 
1442 	return 0;
1443 }
1444 
1445 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1446 static const uint32_t *
fm10k_dev_supported_ptypes_get(struct rte_eth_dev * dev)1447 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1448 {
1449 	if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1450 	    dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1451 		static uint32_t ptypes[] = {
1452 			/* refers to rx_desc_to_ol_flags() */
1453 			RTE_PTYPE_L2_ETHER,
1454 			RTE_PTYPE_L3_IPV4,
1455 			RTE_PTYPE_L3_IPV4_EXT,
1456 			RTE_PTYPE_L3_IPV6,
1457 			RTE_PTYPE_L3_IPV6_EXT,
1458 			RTE_PTYPE_L4_TCP,
1459 			RTE_PTYPE_L4_UDP,
1460 			RTE_PTYPE_UNKNOWN
1461 		};
1462 
1463 		return ptypes;
1464 	} else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1465 		   dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1466 		static uint32_t ptypes_vec[] = {
1467 			/* refers to fm10k_desc_to_pktype_v() */
1468 			RTE_PTYPE_L3_IPV4,
1469 			RTE_PTYPE_L3_IPV4_EXT,
1470 			RTE_PTYPE_L3_IPV6,
1471 			RTE_PTYPE_L3_IPV6_EXT,
1472 			RTE_PTYPE_L4_TCP,
1473 			RTE_PTYPE_L4_UDP,
1474 			RTE_PTYPE_TUNNEL_GENEVE,
1475 			RTE_PTYPE_TUNNEL_NVGRE,
1476 			RTE_PTYPE_TUNNEL_VXLAN,
1477 			RTE_PTYPE_TUNNEL_GRE,
1478 			RTE_PTYPE_UNKNOWN
1479 		};
1480 
1481 		return ptypes_vec;
1482 	}
1483 
1484 	return NULL;
1485 }
1486 #else
1487 static const uint32_t *
fm10k_dev_supported_ptypes_get(struct rte_eth_dev * dev __rte_unused)1488 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1489 {
1490 	return NULL;
1491 }
1492 #endif
1493 
1494 static int
fm10k_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1495 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1496 {
1497 	s32 result;
1498 	uint16_t mac_num = 0;
1499 	uint32_t vid_idx, vid_bit, mac_index;
1500 	struct fm10k_hw *hw;
1501 	struct fm10k_macvlan_filter_info *macvlan;
1502 	struct rte_eth_dev_data *data = dev->data;
1503 
1504 	hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1505 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1506 
1507 	if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1508 		PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1509 		return -EINVAL;
1510 	}
1511 
1512 	if (vlan_id > ETH_VLAN_ID_MAX) {
1513 		PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1514 		return -EINVAL;
1515 	}
1516 
1517 	vid_idx = FM10K_VFTA_IDX(vlan_id);
1518 	vid_bit = FM10K_VFTA_BIT(vlan_id);
1519 	/* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1520 	if (on && (macvlan->vfta[vid_idx] & vid_bit))
1521 		return 0;
1522 	/* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1523 	if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1524 		PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1525 			"in the VLAN filter table");
1526 		return -EINVAL;
1527 	}
1528 
1529 	fm10k_mbx_lock(hw);
1530 	result = fm10k_update_vlan(hw, vlan_id, 0, on);
1531 	fm10k_mbx_unlock(hw);
1532 	if (result != FM10K_SUCCESS) {
1533 		PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1534 		return -EIO;
1535 	}
1536 
1537 	for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1538 			(result == FM10K_SUCCESS); mac_index++) {
1539 		if (rte_is_zero_ether_addr(&data->mac_addrs[mac_index]))
1540 			continue;
1541 		if (mac_num > macvlan->mac_num - 1) {
1542 			PMD_INIT_LOG(ERR, "MAC address number "
1543 					"not match");
1544 			break;
1545 		}
1546 		fm10k_mbx_lock(hw);
1547 		result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1548 			data->mac_addrs[mac_index].addr_bytes,
1549 			vlan_id, on, 0);
1550 		fm10k_mbx_unlock(hw);
1551 		mac_num++;
1552 	}
1553 	if (result != FM10K_SUCCESS) {
1554 		PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1555 		return -EIO;
1556 	}
1557 
1558 	if (on) {
1559 		macvlan->vlan_num++;
1560 		macvlan->vfta[vid_idx] |= vid_bit;
1561 	} else {
1562 		macvlan->vlan_num--;
1563 		macvlan->vfta[vid_idx] &= ~vid_bit;
1564 	}
1565 	return 0;
1566 }
1567 
1568 static int
fm10k_vlan_offload_set(struct rte_eth_dev * dev __rte_unused,int mask __rte_unused)1569 fm10k_vlan_offload_set(struct rte_eth_dev *dev __rte_unused,
1570 		       int mask __rte_unused)
1571 {
1572 	return 0;
1573 }
1574 
1575 /* Add/Remove a MAC address, and update filters to main VSI */
fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev * dev,const u8 * mac,bool add,uint32_t pool)1576 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1577 		const u8 *mac, bool add, uint32_t pool)
1578 {
1579 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1580 	struct fm10k_macvlan_filter_info *macvlan;
1581 	uint32_t i, j, k;
1582 
1583 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1584 
1585 	if (pool != MAIN_VSI_POOL_NUMBER) {
1586 		PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1587 			"mac to pool %u", pool);
1588 		return;
1589 	}
1590 	for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1591 		if (!macvlan->vfta[j])
1592 			continue;
1593 		for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1594 			if (!(macvlan->vfta[j] & (1 << k)))
1595 				continue;
1596 			if (i + 1 > macvlan->vlan_num) {
1597 				PMD_INIT_LOG(ERR, "vlan number not match");
1598 				return;
1599 			}
1600 			fm10k_mbx_lock(hw);
1601 			fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1602 				j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1603 			fm10k_mbx_unlock(hw);
1604 			i++;
1605 		}
1606 	}
1607 }
1608 
1609 /* Add/Remove a MAC address, and update filters to VMDQ */
fm10k_MAC_filter_set_vmdq(struct rte_eth_dev * dev,const u8 * mac,bool add,uint32_t pool)1610 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1611 		const u8 *mac, bool add, uint32_t pool)
1612 {
1613 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1614 	struct fm10k_macvlan_filter_info *macvlan;
1615 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
1616 	uint32_t i;
1617 
1618 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1619 	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1620 
1621 	if (pool > macvlan->nb_queue_pools) {
1622 		PMD_DRV_LOG(ERR, "Pool number %u invalid."
1623 			" Max pool is %u",
1624 			pool, macvlan->nb_queue_pools);
1625 		return;
1626 	}
1627 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1628 		if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1629 			continue;
1630 		fm10k_mbx_lock(hw);
1631 		fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1632 			vmdq_conf->pool_map[i].vlan_id, add, 0);
1633 		fm10k_mbx_unlock(hw);
1634 	}
1635 }
1636 
1637 /* Add/Remove a MAC address, and update filters */
fm10k_MAC_filter_set(struct rte_eth_dev * dev,const u8 * mac,bool add,uint32_t pool)1638 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1639 		const u8 *mac, bool add, uint32_t pool)
1640 {
1641 	struct fm10k_macvlan_filter_info *macvlan;
1642 
1643 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1644 
1645 	if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1646 		fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1647 	else
1648 		fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1649 
1650 	if (add)
1651 		macvlan->mac_num++;
1652 	else
1653 		macvlan->mac_num--;
1654 }
1655 
1656 /* Add a MAC address, and update filters */
1657 static int
fm10k_macaddr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)1658 fm10k_macaddr_add(struct rte_eth_dev *dev,
1659 		struct rte_ether_addr *mac_addr,
1660 		uint32_t index,
1661 		uint32_t pool)
1662 {
1663 	struct fm10k_macvlan_filter_info *macvlan;
1664 
1665 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1666 	fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1667 	macvlan->mac_vmdq_id[index] = pool;
1668 	return 0;
1669 }
1670 
1671 /* Remove a MAC address, and update filters */
1672 static void
fm10k_macaddr_remove(struct rte_eth_dev * dev,uint32_t index)1673 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1674 {
1675 	struct rte_eth_dev_data *data = dev->data;
1676 	struct fm10k_macvlan_filter_info *macvlan;
1677 
1678 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1679 	fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1680 			FALSE, macvlan->mac_vmdq_id[index]);
1681 	macvlan->mac_vmdq_id[index] = 0;
1682 }
1683 
1684 static inline int
check_nb_desc(uint16_t min,uint16_t max,uint16_t mult,uint16_t request)1685 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1686 {
1687 	if ((request < min) || (request > max) || ((request % mult) != 0))
1688 		return -1;
1689 	else
1690 		return 0;
1691 }
1692 
1693 
1694 static inline int
check_thresh(uint16_t min,uint16_t max,uint16_t div,uint16_t request)1695 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1696 {
1697 	if ((request < min) || (request > max) || ((div % request) != 0))
1698 		return -1;
1699 	else
1700 		return 0;
1701 }
1702 
1703 static inline int
handle_rxconf(struct fm10k_rx_queue * q,const struct rte_eth_rxconf * conf)1704 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1705 {
1706 	uint16_t rx_free_thresh;
1707 
1708 	if (conf->rx_free_thresh == 0)
1709 		rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1710 	else
1711 		rx_free_thresh = conf->rx_free_thresh;
1712 
1713 	/* make sure the requested threshold satisfies the constraints */
1714 	if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1715 			FM10K_RX_FREE_THRESH_MAX(q),
1716 			FM10K_RX_FREE_THRESH_DIV(q),
1717 			rx_free_thresh)) {
1718 		PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1719 			"less than or equal to %u, "
1720 			"greater than or equal to %u, "
1721 			"and a divisor of %u",
1722 			rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1723 			FM10K_RX_FREE_THRESH_MIN(q),
1724 			FM10K_RX_FREE_THRESH_DIV(q));
1725 		return -EINVAL;
1726 	}
1727 
1728 	q->alloc_thresh = rx_free_thresh;
1729 	q->drop_en = conf->rx_drop_en;
1730 	q->rx_deferred_start = conf->rx_deferred_start;
1731 
1732 	return 0;
1733 }
1734 
1735 /*
1736  * Hardware requires specific alignment for Rx packet buffers. At
1737  * least one of the following two conditions must be satisfied.
1738  *  1. Address is 512B aligned
1739  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1740  *
1741  * As such, the driver may need to adjust the DMA address within the
1742  * buffer by up to 512B.
1743  *
1744  * return 1 if the element size is valid, otherwise return 0.
1745  */
1746 static int
mempool_element_size_valid(struct rte_mempool * mp)1747 mempool_element_size_valid(struct rte_mempool *mp)
1748 {
1749 	uint32_t min_size;
1750 
1751 	/* elt_size includes mbuf header and headroom */
1752 	min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1753 			RTE_PKTMBUF_HEADROOM;
1754 
1755 	/* account for up to 512B of alignment */
1756 	min_size -= FM10K_RX_DATABUF_ALIGN;
1757 
1758 	/* sanity check for overflow */
1759 	if (min_size > mp->elt_size)
1760 		return 0;
1761 
1762 	/* size is valid */
1763 	return 1;
1764 }
1765 
fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev * dev)1766 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1767 {
1768 	RTE_SET_USED(dev);
1769 
1770 	return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
1771 }
1772 
fm10k_get_rx_port_offloads_capa(struct rte_eth_dev * dev)1773 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1774 {
1775 	RTE_SET_USED(dev);
1776 
1777 	return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
1778 			   DEV_RX_OFFLOAD_VLAN_FILTER |
1779 			   DEV_RX_OFFLOAD_IPV4_CKSUM  |
1780 			   DEV_RX_OFFLOAD_UDP_CKSUM   |
1781 			   DEV_RX_OFFLOAD_TCP_CKSUM   |
1782 			   DEV_RX_OFFLOAD_JUMBO_FRAME |
1783 			   DEV_RX_OFFLOAD_HEADER_SPLIT |
1784 			   DEV_RX_OFFLOAD_RSS_HASH);
1785 }
1786 
1787 static int
fm10k_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_id,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * conf,struct rte_mempool * mp)1788 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1789 	uint16_t nb_desc, unsigned int socket_id,
1790 	const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1791 {
1792 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1793 	struct fm10k_dev_info *dev_info =
1794 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1795 	struct fm10k_rx_queue *q;
1796 	const struct rte_memzone *mz;
1797 	uint64_t offloads;
1798 
1799 	PMD_INIT_FUNC_TRACE();
1800 
1801 	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1802 
1803 	/* make sure the mempool element size can account for alignment. */
1804 	if (!mempool_element_size_valid(mp)) {
1805 		PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1806 		return -EINVAL;
1807 	}
1808 
1809 	/* make sure a valid number of descriptors have been requested */
1810 	if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1811 				FM10K_MULT_RX_DESC, nb_desc)) {
1812 		PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1813 			"less than or equal to %"PRIu32", "
1814 			"greater than or equal to %u, "
1815 			"and a multiple of %u",
1816 			nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1817 			FM10K_MULT_RX_DESC);
1818 		return -EINVAL;
1819 	}
1820 
1821 	/*
1822 	 * if this queue existed already, free the associated memory. The
1823 	 * queue cannot be reused in case we need to allocate memory on
1824 	 * different socket than was previously used.
1825 	 */
1826 	if (dev->data->rx_queues[queue_id] != NULL) {
1827 		rx_queue_free(dev->data->rx_queues[queue_id]);
1828 		dev->data->rx_queues[queue_id] = NULL;
1829 	}
1830 
1831 	/* allocate memory for the queue structure */
1832 	q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1833 				socket_id);
1834 	if (q == NULL) {
1835 		PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1836 		return -ENOMEM;
1837 	}
1838 
1839 	/* setup queue */
1840 	q->mp = mp;
1841 	q->nb_desc = nb_desc;
1842 	q->nb_fake_desc = FM10K_MULT_RX_DESC;
1843 	q->port_id = dev->data->port_id;
1844 	q->queue_id = queue_id;
1845 	q->tail_ptr = (volatile uint32_t *)
1846 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1847 	q->offloads = offloads;
1848 	if (handle_rxconf(q, conf)) {
1849 		rte_free(q);
1850 		return -EINVAL;
1851 	}
1852 	/* allocate memory for the software ring */
1853 	q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1854 			(nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1855 			RTE_CACHE_LINE_SIZE, socket_id);
1856 	if (q->sw_ring == NULL) {
1857 		PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1858 		rte_free(q);
1859 		return -ENOMEM;
1860 	}
1861 
1862 	/*
1863 	 * allocate memory for the hardware descriptor ring. A memzone large
1864 	 * enough to hold the maximum ring size is requested to allow for
1865 	 * resizing in later calls to the queue setup function.
1866 	 */
1867 	mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1868 				      FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1869 				      socket_id);
1870 	if (mz == NULL) {
1871 		PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1872 		rte_free(q->sw_ring);
1873 		rte_free(q);
1874 		return -ENOMEM;
1875 	}
1876 	q->hw_ring = mz->addr;
1877 	q->hw_ring_phys_addr = mz->iova;
1878 
1879 	/* Check if number of descs satisfied Vector requirement */
1880 	if (!rte_is_power_of_2(nb_desc)) {
1881 		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1882 				    "preconditions - canceling the feature for "
1883 				    "the whole port[%d]",
1884 			     q->queue_id, q->port_id);
1885 		dev_info->rx_vec_allowed = false;
1886 	} else
1887 		fm10k_rxq_vec_setup(q);
1888 
1889 	dev->data->rx_queues[queue_id] = q;
1890 	return 0;
1891 }
1892 
1893 static void
fm10k_rx_queue_release(void * queue)1894 fm10k_rx_queue_release(void *queue)
1895 {
1896 	PMD_INIT_FUNC_TRACE();
1897 
1898 	rx_queue_free(queue);
1899 }
1900 
1901 static inline int
handle_txconf(struct fm10k_tx_queue * q,const struct rte_eth_txconf * conf)1902 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1903 {
1904 	uint16_t tx_free_thresh;
1905 	uint16_t tx_rs_thresh;
1906 
1907 	/* constraint MACROs require that tx_free_thresh is configured
1908 	 * before tx_rs_thresh */
1909 	if (conf->tx_free_thresh == 0)
1910 		tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1911 	else
1912 		tx_free_thresh = conf->tx_free_thresh;
1913 
1914 	/* make sure the requested threshold satisfies the constraints */
1915 	if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1916 			FM10K_TX_FREE_THRESH_MAX(q),
1917 			FM10K_TX_FREE_THRESH_DIV(q),
1918 			tx_free_thresh)) {
1919 		PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1920 			"less than or equal to %u, "
1921 			"greater than or equal to %u, "
1922 			"and a divisor of %u",
1923 			tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1924 			FM10K_TX_FREE_THRESH_MIN(q),
1925 			FM10K_TX_FREE_THRESH_DIV(q));
1926 		return -EINVAL;
1927 	}
1928 
1929 	q->free_thresh = tx_free_thresh;
1930 
1931 	if (conf->tx_rs_thresh == 0)
1932 		tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1933 	else
1934 		tx_rs_thresh = conf->tx_rs_thresh;
1935 
1936 	q->tx_deferred_start = conf->tx_deferred_start;
1937 
1938 	/* make sure the requested threshold satisfies the constraints */
1939 	if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1940 			FM10K_TX_RS_THRESH_MAX(q),
1941 			FM10K_TX_RS_THRESH_DIV(q),
1942 			tx_rs_thresh)) {
1943 		PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1944 			"less than or equal to %u, "
1945 			"greater than or equal to %u, "
1946 			"and a divisor of %u",
1947 			tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1948 			FM10K_TX_RS_THRESH_MIN(q),
1949 			FM10K_TX_RS_THRESH_DIV(q));
1950 		return -EINVAL;
1951 	}
1952 
1953 	q->rs_thresh = tx_rs_thresh;
1954 
1955 	return 0;
1956 }
1957 
fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev * dev)1958 static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1959 {
1960 	RTE_SET_USED(dev);
1961 
1962 	return 0;
1963 }
1964 
fm10k_get_tx_port_offloads_capa(struct rte_eth_dev * dev)1965 static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1966 {
1967 	RTE_SET_USED(dev);
1968 
1969 	return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
1970 			  DEV_TX_OFFLOAD_MULTI_SEGS  |
1971 			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
1972 			  DEV_TX_OFFLOAD_UDP_CKSUM   |
1973 			  DEV_TX_OFFLOAD_TCP_CKSUM   |
1974 			  DEV_TX_OFFLOAD_TCP_TSO);
1975 }
1976 
1977 static int
fm10k_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_id,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * conf)1978 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1979 	uint16_t nb_desc, unsigned int socket_id,
1980 	const struct rte_eth_txconf *conf)
1981 {
1982 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1983 	struct fm10k_tx_queue *q;
1984 	const struct rte_memzone *mz;
1985 	uint64_t offloads;
1986 
1987 	PMD_INIT_FUNC_TRACE();
1988 
1989 	offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
1990 
1991 	/* make sure a valid number of descriptors have been requested */
1992 	if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1993 				FM10K_MULT_TX_DESC, nb_desc)) {
1994 		PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1995 			"less than or equal to %"PRIu32", "
1996 			"greater than or equal to %u, "
1997 			"and a multiple of %u",
1998 			nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1999 			FM10K_MULT_TX_DESC);
2000 		return -EINVAL;
2001 	}
2002 
2003 	/*
2004 	 * if this queue existed already, free the associated memory. The
2005 	 * queue cannot be reused in case we need to allocate memory on
2006 	 * different socket than was previously used.
2007 	 */
2008 	if (dev->data->tx_queues[queue_id] != NULL) {
2009 		struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
2010 
2011 		tx_queue_free(txq);
2012 		dev->data->tx_queues[queue_id] = NULL;
2013 	}
2014 
2015 	/* allocate memory for the queue structure */
2016 	q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
2017 				socket_id);
2018 	if (q == NULL) {
2019 		PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
2020 		return -ENOMEM;
2021 	}
2022 
2023 	/* setup queue */
2024 	q->nb_desc = nb_desc;
2025 	q->port_id = dev->data->port_id;
2026 	q->queue_id = queue_id;
2027 	q->offloads = offloads;
2028 	q->ops = &def_txq_ops;
2029 	q->tail_ptr = (volatile uint32_t *)
2030 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2031 	if (handle_txconf(q, conf)) {
2032 		rte_free(q);
2033 		return -EINVAL;
2034 	}
2035 
2036 	/* allocate memory for the software ring */
2037 	q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2038 					nb_desc * sizeof(struct rte_mbuf *),
2039 					RTE_CACHE_LINE_SIZE, socket_id);
2040 	if (q->sw_ring == NULL) {
2041 		PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2042 		rte_free(q);
2043 		return -ENOMEM;
2044 	}
2045 
2046 	/*
2047 	 * allocate memory for the hardware descriptor ring. A memzone large
2048 	 * enough to hold the maximum ring size is requested to allow for
2049 	 * resizing in later calls to the queue setup function.
2050 	 */
2051 	mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2052 				      FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2053 				      socket_id);
2054 	if (mz == NULL) {
2055 		PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2056 		rte_free(q->sw_ring);
2057 		rte_free(q);
2058 		return -ENOMEM;
2059 	}
2060 	q->hw_ring = mz->addr;
2061 	q->hw_ring_phys_addr = mz->iova;
2062 
2063 	/*
2064 	 * allocate memory for the RS bit tracker. Enough slots to hold the
2065 	 * descriptor index for each RS bit needing to be set are required.
2066 	 */
2067 	q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2068 				((nb_desc + 1) / q->rs_thresh) *
2069 				sizeof(uint16_t),
2070 				RTE_CACHE_LINE_SIZE, socket_id);
2071 	if (q->rs_tracker.list == NULL) {
2072 		PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2073 		rte_free(q->sw_ring);
2074 		rte_free(q);
2075 		return -ENOMEM;
2076 	}
2077 
2078 	dev->data->tx_queues[queue_id] = q;
2079 	return 0;
2080 }
2081 
2082 static void
fm10k_tx_queue_release(void * queue)2083 fm10k_tx_queue_release(void *queue)
2084 {
2085 	struct fm10k_tx_queue *q = queue;
2086 	PMD_INIT_FUNC_TRACE();
2087 
2088 	tx_queue_free(q);
2089 }
2090 
2091 static int
fm10k_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)2092 fm10k_reta_update(struct rte_eth_dev *dev,
2093 			struct rte_eth_rss_reta_entry64 *reta_conf,
2094 			uint16_t reta_size)
2095 {
2096 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2097 	uint16_t i, j, idx, shift;
2098 	uint8_t mask;
2099 	uint32_t reta;
2100 
2101 	PMD_INIT_FUNC_TRACE();
2102 
2103 	if (reta_size > FM10K_MAX_RSS_INDICES) {
2104 		PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2105 			"(%d) doesn't match the number hardware can supported "
2106 			"(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2107 		return -EINVAL;
2108 	}
2109 
2110 	/*
2111 	 * Update Redirection Table RETA[n], n=0..31. The redirection table has
2112 	 * 128-entries in 32 registers
2113 	 */
2114 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2115 		idx = i / RTE_RETA_GROUP_SIZE;
2116 		shift = i % RTE_RETA_GROUP_SIZE;
2117 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2118 				BIT_MASK_PER_UINT32);
2119 		if (mask == 0)
2120 			continue;
2121 
2122 		reta = 0;
2123 		if (mask != BIT_MASK_PER_UINT32)
2124 			reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2125 
2126 		for (j = 0; j < CHARS_PER_UINT32; j++) {
2127 			if (mask & (0x1 << j)) {
2128 				if (mask != 0xF)
2129 					reta &= ~(UINT8_MAX << CHAR_BIT * j);
2130 				reta |= reta_conf[idx].reta[shift + j] <<
2131 						(CHAR_BIT * j);
2132 			}
2133 		}
2134 		FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2135 	}
2136 
2137 	return 0;
2138 }
2139 
2140 static int
fm10k_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)2141 fm10k_reta_query(struct rte_eth_dev *dev,
2142 			struct rte_eth_rss_reta_entry64 *reta_conf,
2143 			uint16_t reta_size)
2144 {
2145 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2146 	uint16_t i, j, idx, shift;
2147 	uint8_t mask;
2148 	uint32_t reta;
2149 
2150 	PMD_INIT_FUNC_TRACE();
2151 
2152 	if (reta_size < FM10K_MAX_RSS_INDICES) {
2153 		PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2154 			"(%d) doesn't match the number hardware can supported "
2155 			"(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2156 		return -EINVAL;
2157 	}
2158 
2159 	/*
2160 	 * Read Redirection Table RETA[n], n=0..31. The redirection table has
2161 	 * 128-entries in 32 registers
2162 	 */
2163 	for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2164 		idx = i / RTE_RETA_GROUP_SIZE;
2165 		shift = i % RTE_RETA_GROUP_SIZE;
2166 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2167 				BIT_MASK_PER_UINT32);
2168 		if (mask == 0)
2169 			continue;
2170 
2171 		reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2172 		for (j = 0; j < CHARS_PER_UINT32; j++) {
2173 			if (mask & (0x1 << j))
2174 				reta_conf[idx].reta[shift + j] = ((reta >>
2175 					CHAR_BIT * j) & UINT8_MAX);
2176 		}
2177 	}
2178 
2179 	return 0;
2180 }
2181 
2182 static int
fm10k_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)2183 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2184 	struct rte_eth_rss_conf *rss_conf)
2185 {
2186 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2187 	uint32_t *key = (uint32_t *)rss_conf->rss_key;
2188 	uint32_t mrqc;
2189 	uint64_t hf = rss_conf->rss_hf;
2190 	int i;
2191 
2192 	PMD_INIT_FUNC_TRACE();
2193 
2194 	if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2195 				FM10K_RSSRK_ENTRIES_PER_REG))
2196 		return -EINVAL;
2197 
2198 	if (hf == 0)
2199 		return -EINVAL;
2200 
2201 	mrqc = 0;
2202 	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2203 	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2204 	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2205 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2206 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2207 	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2208 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2209 	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2210 	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2211 
2212 	/* If the mapping doesn't fit any supported, return */
2213 	if (mrqc == 0)
2214 		return -EINVAL;
2215 
2216 	if (key != NULL)
2217 		for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2218 			FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2219 
2220 	FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2221 
2222 	return 0;
2223 }
2224 
2225 static int
fm10k_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)2226 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2227 	struct rte_eth_rss_conf *rss_conf)
2228 {
2229 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2230 	uint32_t *key = (uint32_t *)rss_conf->rss_key;
2231 	uint32_t mrqc;
2232 	uint64_t hf;
2233 	int i;
2234 
2235 	PMD_INIT_FUNC_TRACE();
2236 
2237 	if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2238 				FM10K_RSSRK_ENTRIES_PER_REG))
2239 		return -EINVAL;
2240 
2241 	if (key != NULL)
2242 		for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2243 			key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2244 
2245 	mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2246 	hf = 0;
2247 	hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2248 	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2249 	hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2250 	hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2251 	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2252 	hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2253 	hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2254 	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2255 	hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2256 
2257 	rss_conf->rss_hf = hf;
2258 
2259 	return 0;
2260 }
2261 
2262 static void
fm10k_dev_enable_intr_pf(struct rte_eth_dev * dev)2263 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2264 {
2265 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2266 	uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2267 
2268 	/* Bind all local non-queue interrupt to vector 0 */
2269 	int_map |= FM10K_MISC_VEC_ID;
2270 
2271 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2272 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2273 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2274 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2275 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2276 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2277 
2278 	/* Enable misc causes */
2279 	FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2280 				FM10K_EIMR_ENABLE(THI_FAULT) |
2281 				FM10K_EIMR_ENABLE(FUM_FAULT) |
2282 				FM10K_EIMR_ENABLE(MAILBOX) |
2283 				FM10K_EIMR_ENABLE(SWITCHREADY) |
2284 				FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2285 				FM10K_EIMR_ENABLE(SRAMERROR) |
2286 				FM10K_EIMR_ENABLE(VFLR));
2287 
2288 	/* Enable ITR 0 */
2289 	FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2290 					FM10K_ITR_MASK_CLEAR);
2291 	FM10K_WRITE_FLUSH(hw);
2292 }
2293 
2294 static void
fm10k_dev_disable_intr_pf(struct rte_eth_dev * dev)2295 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2296 {
2297 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2298 	uint32_t int_map = FM10K_INT_MAP_DISABLE;
2299 
2300 	int_map |= FM10K_MISC_VEC_ID;
2301 
2302 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2303 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2304 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2305 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2306 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2307 	FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2308 
2309 	/* Disable misc causes */
2310 	FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2311 				FM10K_EIMR_DISABLE(THI_FAULT) |
2312 				FM10K_EIMR_DISABLE(FUM_FAULT) |
2313 				FM10K_EIMR_DISABLE(MAILBOX) |
2314 				FM10K_EIMR_DISABLE(SWITCHREADY) |
2315 				FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2316 				FM10K_EIMR_DISABLE(SRAMERROR) |
2317 				FM10K_EIMR_DISABLE(VFLR));
2318 
2319 	/* Disable ITR 0 */
2320 	FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2321 	FM10K_WRITE_FLUSH(hw);
2322 }
2323 
2324 static void
fm10k_dev_enable_intr_vf(struct rte_eth_dev * dev)2325 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2326 {
2327 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2328 	uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2329 
2330 	/* Bind all local non-queue interrupt to vector 0 */
2331 	int_map |= FM10K_MISC_VEC_ID;
2332 
2333 	/* Only INT 0 available, other 15 are reserved. */
2334 	FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2335 
2336 	/* Enable ITR 0 */
2337 	FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2338 					FM10K_ITR_MASK_CLEAR);
2339 	FM10K_WRITE_FLUSH(hw);
2340 }
2341 
2342 static void
fm10k_dev_disable_intr_vf(struct rte_eth_dev * dev)2343 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2344 {
2345 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2346 	uint32_t int_map = FM10K_INT_MAP_DISABLE;
2347 
2348 	int_map |= FM10K_MISC_VEC_ID;
2349 
2350 	/* Only INT 0 available, other 15 are reserved. */
2351 	FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2352 
2353 	/* Disable ITR 0 */
2354 	FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2355 	FM10K_WRITE_FLUSH(hw);
2356 }
2357 
2358 static int
fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)2359 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2360 {
2361 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2362 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2363 
2364 	/* Enable ITR */
2365 	if (hw->mac.type == fm10k_mac_pf)
2366 		FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2367 			FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2368 	else
2369 		FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2370 			FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2371 	rte_intr_ack(&pdev->intr_handle);
2372 	return 0;
2373 }
2374 
2375 static int
fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)2376 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2377 {
2378 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2379 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2380 
2381 	/* Disable ITR */
2382 	if (hw->mac.type == fm10k_mac_pf)
2383 		FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2384 			FM10K_ITR_MASK_SET);
2385 	else
2386 		FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2387 			FM10K_ITR_MASK_SET);
2388 	return 0;
2389 }
2390 
2391 static int
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev * dev)2392 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2393 {
2394 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2395 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2396 	struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2397 	uint32_t intr_vector, vec;
2398 	uint16_t queue_id;
2399 	int result = 0;
2400 
2401 	/* fm10k needs one separate interrupt for mailbox,
2402 	 * so only drivers which support multiple interrupt vectors
2403 	 * e.g. vfio-pci can work for fm10k interrupt mode
2404 	 */
2405 	if (!rte_intr_cap_multiple(intr_handle) ||
2406 			dev->data->dev_conf.intr_conf.rxq == 0)
2407 		return result;
2408 
2409 	intr_vector = dev->data->nb_rx_queues;
2410 
2411 	/* disable interrupt first */
2412 	rte_intr_disable(intr_handle);
2413 	if (hw->mac.type == fm10k_mac_pf)
2414 		fm10k_dev_disable_intr_pf(dev);
2415 	else
2416 		fm10k_dev_disable_intr_vf(dev);
2417 
2418 	if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2419 		PMD_INIT_LOG(ERR, "Failed to init event fd");
2420 		result = -EIO;
2421 	}
2422 
2423 	if (rte_intr_dp_is_en(intr_handle) && !result) {
2424 		intr_handle->intr_vec =	rte_zmalloc("intr_vec",
2425 			dev->data->nb_rx_queues * sizeof(int), 0);
2426 		if (intr_handle->intr_vec) {
2427 			for (queue_id = 0, vec = FM10K_RX_VEC_START;
2428 					queue_id < dev->data->nb_rx_queues;
2429 					queue_id++) {
2430 				intr_handle->intr_vec[queue_id] = vec;
2431 				if (vec < intr_handle->nb_efd - 1
2432 						+ FM10K_RX_VEC_START)
2433 					vec++;
2434 			}
2435 		} else {
2436 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2437 				" intr_vec", dev->data->nb_rx_queues);
2438 			rte_intr_efd_disable(intr_handle);
2439 			result = -ENOMEM;
2440 		}
2441 	}
2442 
2443 	if (hw->mac.type == fm10k_mac_pf)
2444 		fm10k_dev_enable_intr_pf(dev);
2445 	else
2446 		fm10k_dev_enable_intr_vf(dev);
2447 	rte_intr_enable(intr_handle);
2448 	hw->mac.ops.update_int_moderator(hw);
2449 	return result;
2450 }
2451 
2452 static int
fm10k_dev_handle_fault(struct fm10k_hw * hw,uint32_t eicr)2453 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2454 {
2455 	struct fm10k_fault fault;
2456 	int err;
2457 	const char *estr = "Unknown error";
2458 
2459 	/* Process PCA fault */
2460 	if (eicr & FM10K_EICR_PCA_FAULT) {
2461 		err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2462 		if (err)
2463 			goto error;
2464 		switch (fault.type) {
2465 		case PCA_NO_FAULT:
2466 			estr = "PCA_NO_FAULT"; break;
2467 		case PCA_UNMAPPED_ADDR:
2468 			estr = "PCA_UNMAPPED_ADDR"; break;
2469 		case PCA_BAD_QACCESS_PF:
2470 			estr = "PCA_BAD_QACCESS_PF"; break;
2471 		case PCA_BAD_QACCESS_VF:
2472 			estr = "PCA_BAD_QACCESS_VF"; break;
2473 		case PCA_MALICIOUS_REQ:
2474 			estr = "PCA_MALICIOUS_REQ"; break;
2475 		case PCA_POISONED_TLP:
2476 			estr = "PCA_POISONED_TLP"; break;
2477 		case PCA_TLP_ABORT:
2478 			estr = "PCA_TLP_ABORT"; break;
2479 		default:
2480 			goto error;
2481 		}
2482 		PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2483 			estr, fault.func ? "VF" : "PF", fault.func,
2484 			fault.address, fault.specinfo);
2485 	}
2486 
2487 	/* Process THI fault */
2488 	if (eicr & FM10K_EICR_THI_FAULT) {
2489 		err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2490 		if (err)
2491 			goto error;
2492 		switch (fault.type) {
2493 		case THI_NO_FAULT:
2494 			estr = "THI_NO_FAULT"; break;
2495 		case THI_MAL_DIS_Q_FAULT:
2496 			estr = "THI_MAL_DIS_Q_FAULT"; break;
2497 		default:
2498 			goto error;
2499 		}
2500 		PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2501 			estr, fault.func ? "VF" : "PF", fault.func,
2502 			fault.address, fault.specinfo);
2503 	}
2504 
2505 	/* Process FUM fault */
2506 	if (eicr & FM10K_EICR_FUM_FAULT) {
2507 		err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2508 		if (err)
2509 			goto error;
2510 		switch (fault.type) {
2511 		case FUM_NO_FAULT:
2512 			estr = "FUM_NO_FAULT"; break;
2513 		case FUM_UNMAPPED_ADDR:
2514 			estr = "FUM_UNMAPPED_ADDR"; break;
2515 		case FUM_POISONED_TLP:
2516 			estr = "FUM_POISONED_TLP"; break;
2517 		case FUM_BAD_VF_QACCESS:
2518 			estr = "FUM_BAD_VF_QACCESS"; break;
2519 		case FUM_ADD_DECODE_ERR:
2520 			estr = "FUM_ADD_DECODE_ERR"; break;
2521 		case FUM_RO_ERROR:
2522 			estr = "FUM_RO_ERROR"; break;
2523 		case FUM_QPRC_CRC_ERROR:
2524 			estr = "FUM_QPRC_CRC_ERROR"; break;
2525 		case FUM_CSR_TIMEOUT:
2526 			estr = "FUM_CSR_TIMEOUT"; break;
2527 		case FUM_INVALID_TYPE:
2528 			estr = "FUM_INVALID_TYPE"; break;
2529 		case FUM_INVALID_LENGTH:
2530 			estr = "FUM_INVALID_LENGTH"; break;
2531 		case FUM_INVALID_BE:
2532 			estr = "FUM_INVALID_BE"; break;
2533 		case FUM_INVALID_ALIGN:
2534 			estr = "FUM_INVALID_ALIGN"; break;
2535 		default:
2536 			goto error;
2537 		}
2538 		PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2539 			estr, fault.func ? "VF" : "PF", fault.func,
2540 			fault.address, fault.specinfo);
2541 	}
2542 
2543 	return 0;
2544 error:
2545 	PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2546 	return err;
2547 }
2548 
2549 /**
2550  * PF interrupt handler triggered by NIC for handling specific interrupt.
2551  *
2552  * @param handle
2553  *  Pointer to interrupt handle.
2554  * @param param
2555  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2556  *
2557  * @return
2558  *  void
2559  */
2560 static void
fm10k_dev_interrupt_handler_pf(void * param)2561 fm10k_dev_interrupt_handler_pf(void *param)
2562 {
2563 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2564 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2565 	uint32_t cause, status;
2566 	struct fm10k_dev_info *dev_info =
2567 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2568 	int status_mbx;
2569 	s32 err;
2570 
2571 	if (hw->mac.type != fm10k_mac_pf)
2572 		return;
2573 
2574 	cause = FM10K_READ_REG(hw, FM10K_EICR);
2575 
2576 	/* Handle PCI fault cases */
2577 	if (cause & FM10K_EICR_FAULT_MASK) {
2578 		PMD_INIT_LOG(ERR, "INT: find fault!");
2579 		fm10k_dev_handle_fault(hw, cause);
2580 	}
2581 
2582 	/* Handle switch up/down */
2583 	if (cause & FM10K_EICR_SWITCHNOTREADY)
2584 		PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2585 
2586 	if (cause & FM10K_EICR_SWITCHREADY) {
2587 		PMD_INIT_LOG(INFO, "INT: Switch is ready");
2588 		if (dev_info->sm_down == 1) {
2589 			fm10k_mbx_lock(hw);
2590 
2591 			/* For recreating logical ports */
2592 			status_mbx = hw->mac.ops.update_lport_state(hw,
2593 					hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2594 			if (status_mbx == FM10K_SUCCESS)
2595 				PMD_INIT_LOG(INFO,
2596 					"INT: Recreated Logical port");
2597 			else
2598 				PMD_INIT_LOG(INFO,
2599 					"INT: Logical ports weren't recreated");
2600 
2601 			status_mbx = hw->mac.ops.update_xcast_mode(hw,
2602 				hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2603 			if (status_mbx != FM10K_SUCCESS)
2604 				PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2605 
2606 			fm10k_mbx_unlock(hw);
2607 
2608 			/* first clear the internal SW recording structure */
2609 			if (!(dev->data->dev_conf.rxmode.mq_mode &
2610 						ETH_MQ_RX_VMDQ_FLAG))
2611 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2612 					false);
2613 
2614 			fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2615 					MAIN_VSI_POOL_NUMBER);
2616 
2617 			/*
2618 			 * Add default mac address and vlan for the logical
2619 			 * ports that have been created, leave to the
2620 			 * application to fully recover Rx filtering.
2621 			 */
2622 			fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2623 					MAIN_VSI_POOL_NUMBER);
2624 
2625 			if (!(dev->data->dev_conf.rxmode.mq_mode &
2626 						ETH_MQ_RX_VMDQ_FLAG))
2627 				fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2628 					true);
2629 
2630 			dev_info->sm_down = 0;
2631 			rte_eth_dev_callback_process(dev,
2632 					RTE_ETH_EVENT_INTR_LSC,
2633 					NULL);
2634 		}
2635 	}
2636 
2637 	/* Handle mailbox message */
2638 	fm10k_mbx_lock(hw);
2639 	err = hw->mbx.ops.process(hw, &hw->mbx);
2640 	fm10k_mbx_unlock(hw);
2641 
2642 	if (err == FM10K_ERR_RESET_REQUESTED) {
2643 		PMD_INIT_LOG(INFO, "INT: Switch is down");
2644 		dev_info->sm_down = 1;
2645 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2646 	}
2647 
2648 	/* Handle SRAM error */
2649 	if (cause & FM10K_EICR_SRAMERROR) {
2650 		PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2651 
2652 		status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2653 		/* Write to clear pending bits */
2654 		FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2655 
2656 		/* Todo: print out error message after shared code  updates */
2657 	}
2658 
2659 	/* Clear these 3 events if having any */
2660 	cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2661 		 FM10K_EICR_SWITCHREADY;
2662 	if (cause)
2663 		FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2664 
2665 	/* Re-enable interrupt from device side */
2666 	FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2667 					FM10K_ITR_MASK_CLEAR);
2668 	/* Re-enable interrupt from host side */
2669 	rte_intr_ack(dev->intr_handle);
2670 }
2671 
2672 /**
2673  * VF interrupt handler triggered by NIC for handling specific interrupt.
2674  *
2675  * @param handle
2676  *  Pointer to interrupt handle.
2677  * @param param
2678  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2679  *
2680  * @return
2681  *  void
2682  */
2683 static void
fm10k_dev_interrupt_handler_vf(void * param)2684 fm10k_dev_interrupt_handler_vf(void *param)
2685 {
2686 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2687 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2688 	struct fm10k_mbx_info *mbx = &hw->mbx;
2689 	struct fm10k_dev_info *dev_info =
2690 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2691 	const enum fm10k_mbx_state state = mbx->state;
2692 	int status_mbx;
2693 
2694 	if (hw->mac.type != fm10k_mac_vf)
2695 		return;
2696 
2697 	/* Handle mailbox message if lock is acquired */
2698 	fm10k_mbx_lock(hw);
2699 	hw->mbx.ops.process(hw, &hw->mbx);
2700 	fm10k_mbx_unlock(hw);
2701 
2702 	if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2703 		PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2704 
2705 		fm10k_mbx_lock(hw);
2706 		hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2707 				MAX_LPORT_NUM, 1);
2708 		fm10k_mbx_unlock(hw);
2709 
2710 		/* Setting reset flag */
2711 		dev_info->sm_down = 1;
2712 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2713 	}
2714 
2715 	if (dev_info->sm_down == 1 &&
2716 			hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2717 		PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2718 		fm10k_mbx_lock(hw);
2719 		status_mbx = hw->mac.ops.update_xcast_mode(hw,
2720 				hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2721 		if (status_mbx != FM10K_SUCCESS)
2722 			PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2723 		fm10k_mbx_unlock(hw);
2724 
2725 		/* first clear the internal SW recording structure */
2726 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2727 		fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2728 				MAIN_VSI_POOL_NUMBER);
2729 
2730 		/*
2731 		 * Add default mac address and vlan for the logical ports that
2732 		 * have been created, leave to the application to fully recover
2733 		 * Rx filtering.
2734 		 */
2735 		fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2736 				MAIN_VSI_POOL_NUMBER);
2737 		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2738 
2739 		dev_info->sm_down = 0;
2740 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2741 	}
2742 
2743 	/* Re-enable interrupt from device side */
2744 	FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2745 					FM10K_ITR_MASK_CLEAR);
2746 	/* Re-enable interrupt from host side */
2747 	rte_intr_ack(dev->intr_handle);
2748 }
2749 
2750 /* Mailbox message handler in VF */
2751 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2752 	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2753 	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2754 	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2755 	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2756 };
2757 
2758 static int
fm10k_setup_mbx_service(struct fm10k_hw * hw)2759 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2760 {
2761 	int err = 0;
2762 
2763 	/* Initialize mailbox lock */
2764 	fm10k_mbx_initlock(hw);
2765 
2766 	/* Replace default message handler with new ones */
2767 	if (hw->mac.type == fm10k_mac_vf)
2768 		err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2769 
2770 	if (err) {
2771 		PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2772 				err);
2773 		return err;
2774 	}
2775 	/* Connect to SM for PF device or PF for VF device */
2776 	return hw->mbx.ops.connect(hw, &hw->mbx);
2777 }
2778 
2779 static void
fm10k_close_mbx_service(struct fm10k_hw * hw)2780 fm10k_close_mbx_service(struct fm10k_hw *hw)
2781 {
2782 	/* Disconnect from SM for PF device or PF for VF device */
2783 	hw->mbx.ops.disconnect(hw, &hw->mbx);
2784 }
2785 
2786 static int
fm10k_dev_close(struct rte_eth_dev * dev)2787 fm10k_dev_close(struct rte_eth_dev *dev)
2788 {
2789 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2790 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2791 	struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2792 	int ret;
2793 
2794 	PMD_INIT_FUNC_TRACE();
2795 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2796 		return 0;
2797 
2798 	fm10k_mbx_lock(hw);
2799 	hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2800 		MAX_LPORT_NUM, false);
2801 	fm10k_mbx_unlock(hw);
2802 
2803 	/* allow 100ms for device to quiesce */
2804 	rte_delay_us(FM10K_SWITCH_QUIESCE_US);
2805 
2806 	/* Stop mailbox service first */
2807 	fm10k_close_mbx_service(hw);
2808 
2809 	ret = fm10k_dev_stop(dev);
2810 
2811 	fm10k_dev_queue_release(dev);
2812 	fm10k_stop_hw(hw);
2813 
2814 	/* disable uio/vfio intr */
2815 	rte_intr_disable(intr_handle);
2816 
2817 	/*PF/VF has different interrupt handling mechanism */
2818 	if (hw->mac.type == fm10k_mac_pf) {
2819 		/* disable interrupt */
2820 		fm10k_dev_disable_intr_pf(dev);
2821 
2822 		/* unregister callback func to eal lib */
2823 		rte_intr_callback_unregister(intr_handle,
2824 			fm10k_dev_interrupt_handler_pf, (void *)dev);
2825 	} else {
2826 		/* disable interrupt */
2827 		fm10k_dev_disable_intr_vf(dev);
2828 
2829 		rte_intr_callback_unregister(intr_handle,
2830 			fm10k_dev_interrupt_handler_vf, (void *)dev);
2831 	}
2832 
2833 	return ret;
2834 }
2835 
2836 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2837 	.dev_configure		= fm10k_dev_configure,
2838 	.dev_start		= fm10k_dev_start,
2839 	.dev_stop		= fm10k_dev_stop,
2840 	.dev_close		= fm10k_dev_close,
2841 	.promiscuous_enable     = fm10k_dev_promiscuous_enable,
2842 	.promiscuous_disable    = fm10k_dev_promiscuous_disable,
2843 	.allmulticast_enable    = fm10k_dev_allmulticast_enable,
2844 	.allmulticast_disable   = fm10k_dev_allmulticast_disable,
2845 	.stats_get		= fm10k_stats_get,
2846 	.xstats_get		= fm10k_xstats_get,
2847 	.xstats_get_names	= fm10k_xstats_get_names,
2848 	.stats_reset		= fm10k_stats_reset,
2849 	.xstats_reset		= fm10k_stats_reset,
2850 	.link_update		= fm10k_link_update,
2851 	.dev_infos_get		= fm10k_dev_infos_get,
2852 	.dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2853 	.vlan_filter_set	= fm10k_vlan_filter_set,
2854 	.vlan_offload_set	= fm10k_vlan_offload_set,
2855 	.mac_addr_add		= fm10k_macaddr_add,
2856 	.mac_addr_remove	= fm10k_macaddr_remove,
2857 	.rx_queue_start		= fm10k_dev_rx_queue_start,
2858 	.rx_queue_stop		= fm10k_dev_rx_queue_stop,
2859 	.tx_queue_start		= fm10k_dev_tx_queue_start,
2860 	.tx_queue_stop		= fm10k_dev_tx_queue_stop,
2861 	.rx_queue_setup		= fm10k_rx_queue_setup,
2862 	.rx_queue_release	= fm10k_rx_queue_release,
2863 	.tx_queue_setup		= fm10k_tx_queue_setup,
2864 	.tx_queue_release	= fm10k_tx_queue_release,
2865 	.rx_queue_intr_enable	= fm10k_dev_rx_queue_intr_enable,
2866 	.rx_queue_intr_disable	= fm10k_dev_rx_queue_intr_disable,
2867 	.reta_update		= fm10k_reta_update,
2868 	.reta_query		= fm10k_reta_query,
2869 	.rss_hash_update	= fm10k_rss_hash_update,
2870 	.rss_hash_conf_get	= fm10k_rss_hash_conf_get,
2871 };
2872 
ftag_check_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)2873 static int ftag_check_handler(__rte_unused const char *key,
2874 		const char *value, __rte_unused void *opaque)
2875 {
2876 	if (strcmp(value, "1"))
2877 		return -1;
2878 
2879 	return 0;
2880 }
2881 
2882 static int
fm10k_check_ftag(struct rte_devargs * devargs)2883 fm10k_check_ftag(struct rte_devargs *devargs)
2884 {
2885 	struct rte_kvargs *kvlist;
2886 	const char *ftag_key = "enable_ftag";
2887 
2888 	if (devargs == NULL)
2889 		return 0;
2890 
2891 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2892 	if (kvlist == NULL)
2893 		return 0;
2894 
2895 	if (!rte_kvargs_count(kvlist, ftag_key)) {
2896 		rte_kvargs_free(kvlist);
2897 		return 0;
2898 	}
2899 	/* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2900 	if (rte_kvargs_process(kvlist, ftag_key,
2901 				ftag_check_handler, NULL) < 0) {
2902 		rte_kvargs_free(kvlist);
2903 		return 0;
2904 	}
2905 	rte_kvargs_free(kvlist);
2906 
2907 	return 1;
2908 }
2909 
2910 static uint16_t
fm10k_xmit_pkts_vec(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2911 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2912 		    uint16_t nb_pkts)
2913 {
2914 	uint16_t nb_tx = 0;
2915 	struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2916 
2917 	while (nb_pkts) {
2918 		uint16_t ret, num;
2919 
2920 		num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2921 		ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2922 						 num);
2923 		nb_tx += ret;
2924 		nb_pkts -= ret;
2925 		if (ret < num)
2926 			break;
2927 	}
2928 
2929 	return nb_tx;
2930 }
2931 
2932 static void __rte_cold
fm10k_set_tx_function(struct rte_eth_dev * dev)2933 fm10k_set_tx_function(struct rte_eth_dev *dev)
2934 {
2935 	struct fm10k_tx_queue *txq;
2936 	int i;
2937 	int use_sse = 1;
2938 	uint16_t tx_ftag_en = 0;
2939 
2940 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2941 		/* primary process has set the ftag flag and offloads */
2942 		txq = dev->data->tx_queues[0];
2943 		if (fm10k_tx_vec_condition_check(txq) ||
2944 				rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
2945 			dev->tx_pkt_burst = fm10k_xmit_pkts;
2946 			dev->tx_pkt_prepare = fm10k_prep_pkts;
2947 			PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2948 		} else {
2949 			PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2950 			dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2951 			dev->tx_pkt_prepare = NULL;
2952 		}
2953 		return;
2954 	}
2955 
2956 	if (fm10k_check_ftag(dev->device->devargs))
2957 		tx_ftag_en = 1;
2958 
2959 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2960 		txq = dev->data->tx_queues[i];
2961 		txq->tx_ftag_en = tx_ftag_en;
2962 		/* Check if Vector Tx is satisfied */
2963 		if (fm10k_tx_vec_condition_check(txq) ||
2964 				rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
2965 			use_sse = 0;
2966 	}
2967 
2968 	if (use_sse) {
2969 		PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2970 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
2971 			txq = dev->data->tx_queues[i];
2972 			fm10k_txq_vec_setup(txq);
2973 		}
2974 		dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2975 		dev->tx_pkt_prepare = NULL;
2976 	} else {
2977 		dev->tx_pkt_burst = fm10k_xmit_pkts;
2978 		dev->tx_pkt_prepare = fm10k_prep_pkts;
2979 		PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2980 	}
2981 }
2982 
2983 static void __rte_cold
fm10k_set_rx_function(struct rte_eth_dev * dev)2984 fm10k_set_rx_function(struct rte_eth_dev *dev)
2985 {
2986 	struct fm10k_dev_info *dev_info =
2987 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2988 	uint16_t i, rx_using_sse;
2989 	uint16_t rx_ftag_en = 0;
2990 
2991 	if (fm10k_check_ftag(dev->device->devargs))
2992 		rx_ftag_en = 1;
2993 
2994 	/* In order to allow Vector Rx there are a few configuration
2995 	 * conditions to be met.
2996 	 */
2997 	if (!fm10k_rx_vec_condition_check(dev) &&
2998 			dev_info->rx_vec_allowed && !rx_ftag_en &&
2999 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3000 		if (dev->data->scattered_rx)
3001 			dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
3002 		else
3003 			dev->rx_pkt_burst = fm10k_recv_pkts_vec;
3004 	} else if (dev->data->scattered_rx)
3005 		dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
3006 	else
3007 		dev->rx_pkt_burst = fm10k_recv_pkts;
3008 
3009 	rx_using_sse =
3010 		(dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
3011 		dev->rx_pkt_burst == fm10k_recv_pkts_vec);
3012 
3013 	if (rx_using_sse)
3014 		PMD_INIT_LOG(DEBUG, "Use vector Rx func");
3015 	else
3016 		PMD_INIT_LOG(DEBUG, "Use regular Rx func");
3017 
3018 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3019 		return;
3020 
3021 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
3022 		struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
3023 
3024 		rxq->rx_using_sse = rx_using_sse;
3025 		rxq->rx_ftag_en = rx_ftag_en;
3026 	}
3027 }
3028 
3029 static void
fm10k_params_init(struct rte_eth_dev * dev)3030 fm10k_params_init(struct rte_eth_dev *dev)
3031 {
3032 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3033 	struct fm10k_dev_info *info =
3034 		FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
3035 
3036 	/* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
3037 	 * there is no way to get link status without reading BAR4.  Until this
3038 	 * works, assume we have maximum bandwidth.
3039 	 * @todo - fix bus info
3040 	 */
3041 	hw->bus_caps.speed = fm10k_bus_speed_8000;
3042 	hw->bus_caps.width = fm10k_bus_width_pcie_x8;
3043 	hw->bus_caps.payload = fm10k_bus_payload_512;
3044 	hw->bus.speed = fm10k_bus_speed_8000;
3045 	hw->bus.width = fm10k_bus_width_pcie_x8;
3046 	hw->bus.payload = fm10k_bus_payload_256;
3047 
3048 	info->rx_vec_allowed = true;
3049 	info->sm_down = false;
3050 }
3051 
3052 static int
eth_fm10k_dev_init(struct rte_eth_dev * dev)3053 eth_fm10k_dev_init(struct rte_eth_dev *dev)
3054 {
3055 	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3056 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3057 	struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3058 	int diag, i;
3059 	struct fm10k_macvlan_filter_info *macvlan;
3060 
3061 	PMD_INIT_FUNC_TRACE();
3062 
3063 	dev->dev_ops = &fm10k_eth_dev_ops;
3064 	dev->rx_queue_count = fm10k_dev_rx_queue_count;
3065 	dev->rx_descriptor_done	= fm10k_dev_rx_descriptor_done;
3066 	dev->rx_descriptor_status = fm10k_dev_rx_descriptor_status;
3067 	dev->tx_descriptor_status = fm10k_dev_tx_descriptor_status;
3068 	dev->rx_pkt_burst = &fm10k_recv_pkts;
3069 	dev->tx_pkt_burst = &fm10k_xmit_pkts;
3070 	dev->tx_pkt_prepare = &fm10k_prep_pkts;
3071 
3072 	/*
3073 	 * Primary process does the whole initialization, for secondary
3074 	 * processes, we just select the same Rx and Tx function as primary.
3075 	 */
3076 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3077 		fm10k_set_rx_function(dev);
3078 		fm10k_set_tx_function(dev);
3079 		return 0;
3080 	}
3081 
3082 	rte_eth_copy_pci_info(dev, pdev);
3083 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3084 
3085 	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
3086 	memset(macvlan, 0, sizeof(*macvlan));
3087 	/* Vendor and Device ID need to be set before init of shared code */
3088 	memset(hw, 0, sizeof(*hw));
3089 	hw->device_id = pdev->id.device_id;
3090 	hw->vendor_id = pdev->id.vendor_id;
3091 	hw->subsystem_device_id = pdev->id.subsystem_device_id;
3092 	hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3093 	hw->revision_id = 0;
3094 	hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3095 	if (hw->hw_addr == NULL) {
3096 		PMD_INIT_LOG(ERR, "Bad mem resource."
3097 			" Try to refuse unused devices.");
3098 		return -EIO;
3099 	}
3100 
3101 	/* Store fm10k_adapter pointer */
3102 	hw->back = dev->data->dev_private;
3103 
3104 	/* Initialize the shared code */
3105 	diag = fm10k_init_shared_code(hw);
3106 	if (diag != FM10K_SUCCESS) {
3107 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3108 		return -EIO;
3109 	}
3110 
3111 	/* Initialize parameters */
3112 	fm10k_params_init(dev);
3113 
3114 	/* Initialize the hw */
3115 	diag = fm10k_init_hw(hw);
3116 	if (diag != FM10K_SUCCESS) {
3117 		PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3118 		return -EIO;
3119 	}
3120 
3121 	/* Initialize MAC address(es) */
3122 	dev->data->mac_addrs = rte_zmalloc("fm10k",
3123 			RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3124 	if (dev->data->mac_addrs == NULL) {
3125 		PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3126 		return -ENOMEM;
3127 	}
3128 
3129 	diag = fm10k_read_mac_addr(hw);
3130 
3131 	rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
3132 			&dev->data->mac_addrs[0]);
3133 
3134 	if (diag != FM10K_SUCCESS ||
3135 		!rte_is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3136 
3137 		/* Generate a random addr */
3138 		rte_eth_random_addr(hw->mac.addr);
3139 		memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3140 		rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
3141 		&dev->data->mac_addrs[0]);
3142 	}
3143 
3144 	/* Reset the hw statistics */
3145 	diag = fm10k_stats_reset(dev);
3146 	if (diag != 0) {
3147 		PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag);
3148 		return diag;
3149 	}
3150 
3151 	/* Reset the hw */
3152 	diag = fm10k_reset_hw(hw);
3153 	if (diag != FM10K_SUCCESS) {
3154 		PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3155 		return -EIO;
3156 	}
3157 
3158 	/* Setup mailbox service */
3159 	diag = fm10k_setup_mbx_service(hw);
3160 	if (diag != FM10K_SUCCESS) {
3161 		PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3162 		return -EIO;
3163 	}
3164 
3165 	/*PF/VF has different interrupt handling mechanism */
3166 	if (hw->mac.type == fm10k_mac_pf) {
3167 		/* register callback func to eal lib */
3168 		rte_intr_callback_register(intr_handle,
3169 			fm10k_dev_interrupt_handler_pf, (void *)dev);
3170 
3171 		/* enable MISC interrupt */
3172 		fm10k_dev_enable_intr_pf(dev);
3173 	} else { /* VF */
3174 		rte_intr_callback_register(intr_handle,
3175 			fm10k_dev_interrupt_handler_vf, (void *)dev);
3176 
3177 		fm10k_dev_enable_intr_vf(dev);
3178 	}
3179 
3180 	/* Enable intr after callback registered */
3181 	rte_intr_enable(intr_handle);
3182 
3183 	hw->mac.ops.update_int_moderator(hw);
3184 
3185 	/* Make sure Switch Manager is ready before going forward. */
3186 	if (hw->mac.type == fm10k_mac_pf) {
3187 		bool switch_ready = false;
3188 
3189 		for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3190 			fm10k_mbx_lock(hw);
3191 			hw->mac.ops.get_host_state(hw, &switch_ready);
3192 			fm10k_mbx_unlock(hw);
3193 			if (switch_ready == true)
3194 				break;
3195 			/* Delay some time to acquire async LPORT_MAP info. */
3196 			rte_delay_us(WAIT_SWITCH_MSG_US);
3197 		}
3198 
3199 		if (switch_ready == false) {
3200 			PMD_INIT_LOG(ERR, "switch is not ready");
3201 			return -1;
3202 		}
3203 	}
3204 
3205 	/*
3206 	 * Below function will trigger operations on mailbox, acquire lock to
3207 	 * avoid race condition from interrupt handler. Operations on mailbox
3208 	 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3209 	 * will handle and generate an interrupt to our side. Then,  FIFO in
3210 	 * mailbox will be touched.
3211 	 */
3212 	fm10k_mbx_lock(hw);
3213 	/* Enable port first */
3214 	hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3215 					MAX_LPORT_NUM, 1);
3216 
3217 	/* Set unicast mode by default. App can change to other mode in other
3218 	 * API func.
3219 	 */
3220 	hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3221 					FM10K_XCAST_MODE_NONE);
3222 
3223 	fm10k_mbx_unlock(hw);
3224 
3225 	/* Make sure default VID is ready before going forward. */
3226 	if (hw->mac.type == fm10k_mac_pf) {
3227 		for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3228 			if (hw->mac.default_vid)
3229 				break;
3230 			/* Delay some time to acquire async port VLAN info. */
3231 			rte_delay_us(WAIT_SWITCH_MSG_US);
3232 		}
3233 
3234 		if (!hw->mac.default_vid) {
3235 			PMD_INIT_LOG(ERR, "default VID is not ready");
3236 			return -1;
3237 		}
3238 	}
3239 
3240 	/* Add default mac address */
3241 	fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3242 		MAIN_VSI_POOL_NUMBER);
3243 
3244 	return 0;
3245 }
3246 
3247 static int
eth_fm10k_dev_uninit(struct rte_eth_dev * dev)3248 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3249 {
3250 	PMD_INIT_FUNC_TRACE();
3251 	fm10k_dev_close(dev);
3252 	return 0;
3253 }
3254 
eth_fm10k_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)3255 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3256 	struct rte_pci_device *pci_dev)
3257 {
3258 	return rte_eth_dev_pci_generic_probe(pci_dev,
3259 		sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3260 }
3261 
eth_fm10k_pci_remove(struct rte_pci_device * pci_dev)3262 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3263 {
3264 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3265 }
3266 
3267 /*
3268  * The set of PCI devices this driver supports. This driver will enable both PF
3269  * and SRIOV-VF devices.
3270  */
3271 static const struct rte_pci_id pci_id_fm10k_map[] = {
3272 	{ RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3273 	{ RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3274 	{ RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3275 	{ .vendor_id = 0, /* sentinel */ },
3276 };
3277 
3278 static struct rte_pci_driver rte_pmd_fm10k = {
3279 	.id_table = pci_id_fm10k_map,
3280 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3281 	.probe = eth_fm10k_pci_probe,
3282 	.remove = eth_fm10k_pci_remove,
3283 };
3284 
3285 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3286 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3287 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
3288 RTE_LOG_REGISTER(fm10k_logtype_init, pmd.net.fm10k.init, NOTICE);
3289 RTE_LOG_REGISTER(fm10k_logtype_driver, pmd.net.fm10k.driver, NOTICE);
3290 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
3291 RTE_LOG_REGISTER(fm10k_logtype_rx, pmd.net.fm10k.rx, DEBUG);
3292 #endif
3293 #ifdef RTE_LIBRTE_FM10K_DEBUG_TX
3294 RTE_LOG_REGISTER(fm10k_logtype_tx, pmd.net.fm10k.tx, DEBUG);
3295 #endif
3296 #ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
3297 RTE_LOG_REGISTER(fm10k_logtype_tx_free, pmd.net.fm10k.tx_free, DEBUG);
3298 #endif
3299