xref: /f-stack/dpdk/drivers/net/atlantic/atl_ethdev.c (revision fa64a7ff)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_ethdev_pci.h>
6 
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
10 #include "atl_logs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
14 
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
17 
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 			       struct rte_eth_dev_info *dev_info);
48 
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
52 
53 /* VLAN stuff */
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 		uint16_t vlan_id, int on);
56 
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 				     uint16_t queue_id, int on);
61 
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 			     enum rte_vlan_type vlan_type, uint16_t tpid);
64 
65 /* EEPROM */
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 			      struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 
72 /* Regs */
73 static int atl_dev_get_regs(struct rte_eth_dev *dev,
74 			    struct rte_dev_reg_info *regs);
75 
76 /* Flow control */
77 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
78 			       struct rte_eth_fc_conf *fc_conf);
79 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 
82 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
83 
84 /* Interrupts */
85 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
86 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
87 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
88 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
89 				    struct rte_intr_handle *handle);
90 static void atl_dev_interrupt_handler(void *param);
91 
92 
93 static int atl_add_mac_addr(struct rte_eth_dev *dev,
94 			    struct ether_addr *mac_addr,
95 			    uint32_t index, uint32_t pool);
96 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
97 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
98 					   struct ether_addr *mac_addr);
99 
100 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
101 				    struct ether_addr *mc_addr_set,
102 				    uint32_t nb_mc_addr);
103 
104 /* RSS */
105 static int atl_reta_update(struct rte_eth_dev *dev,
106 			     struct rte_eth_rss_reta_entry64 *reta_conf,
107 			     uint16_t reta_size);
108 static int atl_reta_query(struct rte_eth_dev *dev,
109 			    struct rte_eth_rss_reta_entry64 *reta_conf,
110 			    uint16_t reta_size);
111 static int atl_rss_hash_update(struct rte_eth_dev *dev,
112 				 struct rte_eth_rss_conf *rss_conf);
113 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
114 				   struct rte_eth_rss_conf *rss_conf);
115 
116 
117 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
118 	struct rte_pci_device *pci_dev);
119 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
120 
121 static void atl_dev_info_get(struct rte_eth_dev *dev,
122 				struct rte_eth_dev_info *dev_info);
123 
124 int atl_logtype_init;
125 int atl_logtype_driver;
126 
127 /*
128  * The set of PCI devices this driver supports
129  */
130 static const struct rte_pci_id pci_id_atl_map[] = {
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
136 
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
143 
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
150 
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
152 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
153 	{ .vendor_id = 0, /* sentinel */ },
154 };
155 
156 static struct rte_pci_driver rte_atl_pmd = {
157 	.id_table = pci_id_atl_map,
158 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
159 		     RTE_PCI_DRV_IOVA_AS_VA,
160 	.probe = eth_atl_pci_probe,
161 	.remove = eth_atl_pci_remove,
162 };
163 
164 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
165 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
166 			| DEV_RX_OFFLOAD_UDP_CKSUM \
167 			| DEV_RX_OFFLOAD_TCP_CKSUM \
168 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
169 			| DEV_RX_OFFLOAD_VLAN_FILTER)
170 
171 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
172 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
173 			| DEV_TX_OFFLOAD_UDP_CKSUM \
174 			| DEV_TX_OFFLOAD_TCP_CKSUM \
175 			| DEV_TX_OFFLOAD_TCP_TSO \
176 			| DEV_TX_OFFLOAD_MULTI_SEGS)
177 
178 #define SFP_EEPROM_SIZE 0x100
179 
180 static const struct rte_eth_desc_lim rx_desc_lim = {
181 	.nb_max = ATL_MAX_RING_DESC,
182 	.nb_min = ATL_MIN_RING_DESC,
183 	.nb_align = ATL_RXD_ALIGN,
184 };
185 
186 static const struct rte_eth_desc_lim tx_desc_lim = {
187 	.nb_max = ATL_MAX_RING_DESC,
188 	.nb_min = ATL_MIN_RING_DESC,
189 	.nb_align = ATL_TXD_ALIGN,
190 	.nb_seg_max = ATL_TX_MAX_SEG,
191 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
192 };
193 
194 #define ATL_XSTATS_FIELD(name) { \
195 	#name, \
196 	offsetof(struct aq_stats_s, name) \
197 }
198 
199 struct atl_xstats_tbl_s {
200 	const char *name;
201 	unsigned int offset;
202 };
203 
204 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
205 	ATL_XSTATS_FIELD(uprc),
206 	ATL_XSTATS_FIELD(mprc),
207 	ATL_XSTATS_FIELD(bprc),
208 	ATL_XSTATS_FIELD(erpt),
209 	ATL_XSTATS_FIELD(uptc),
210 	ATL_XSTATS_FIELD(mptc),
211 	ATL_XSTATS_FIELD(bptc),
212 	ATL_XSTATS_FIELD(erpr),
213 	ATL_XSTATS_FIELD(ubrc),
214 	ATL_XSTATS_FIELD(ubtc),
215 	ATL_XSTATS_FIELD(mbrc),
216 	ATL_XSTATS_FIELD(mbtc),
217 	ATL_XSTATS_FIELD(bbrc),
218 	ATL_XSTATS_FIELD(bbtc),
219 };
220 
221 static const struct eth_dev_ops atl_eth_dev_ops = {
222 	.dev_configure	      = atl_dev_configure,
223 	.dev_start	      = atl_dev_start,
224 	.dev_stop	      = atl_dev_stop,
225 	.dev_set_link_up      = atl_dev_set_link_up,
226 	.dev_set_link_down    = atl_dev_set_link_down,
227 	.dev_close	      = atl_dev_close,
228 	.dev_reset	      = atl_dev_reset,
229 
230 	/* PROMISC */
231 	.promiscuous_enable   = atl_dev_promiscuous_enable,
232 	.promiscuous_disable  = atl_dev_promiscuous_disable,
233 	.allmulticast_enable  = atl_dev_allmulticast_enable,
234 	.allmulticast_disable = atl_dev_allmulticast_disable,
235 
236 	/* Link */
237 	.link_update	      = atl_dev_link_update,
238 
239 	.get_reg              = atl_dev_get_regs,
240 
241 	/* Stats */
242 	.stats_get	      = atl_dev_stats_get,
243 	.xstats_get	      = atl_dev_xstats_get,
244 	.xstats_get_names     = atl_dev_xstats_get_names,
245 	.stats_reset	      = atl_dev_stats_reset,
246 	.xstats_reset	      = atl_dev_stats_reset,
247 
248 	.fw_version_get       = atl_fw_version_get,
249 	.dev_infos_get	      = atl_dev_info_get,
250 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
251 
252 	.mtu_set              = atl_dev_mtu_set,
253 
254 	/* VLAN */
255 	.vlan_filter_set      = atl_vlan_filter_set,
256 	.vlan_offload_set     = atl_vlan_offload_set,
257 	.vlan_tpid_set        = atl_vlan_tpid_set,
258 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
259 
260 	/* Queue Control */
261 	.rx_queue_start	      = atl_rx_queue_start,
262 	.rx_queue_stop	      = atl_rx_queue_stop,
263 	.rx_queue_setup       = atl_rx_queue_setup,
264 	.rx_queue_release     = atl_rx_queue_release,
265 
266 	.tx_queue_start	      = atl_tx_queue_start,
267 	.tx_queue_stop	      = atl_tx_queue_stop,
268 	.tx_queue_setup       = atl_tx_queue_setup,
269 	.tx_queue_release     = atl_tx_queue_release,
270 
271 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
272 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
273 
274 	.rx_queue_count       = atl_rx_queue_count,
275 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
276 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
277 
278 	/* EEPROM */
279 	.get_eeprom_length    = atl_dev_get_eeprom_length,
280 	.get_eeprom           = atl_dev_get_eeprom,
281 	.set_eeprom           = atl_dev_set_eeprom,
282 
283 	/* Flow Control */
284 	.flow_ctrl_get	      = atl_flow_ctrl_get,
285 	.flow_ctrl_set	      = atl_flow_ctrl_set,
286 
287 	/* MAC */
288 	.mac_addr_add	      = atl_add_mac_addr,
289 	.mac_addr_remove      = atl_remove_mac_addr,
290 	.mac_addr_set	      = atl_set_default_mac_addr,
291 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
292 	.rxq_info_get	      = atl_rxq_info_get,
293 	.txq_info_get	      = atl_txq_info_get,
294 
295 	.reta_update          = atl_reta_update,
296 	.reta_query           = atl_reta_query,
297 	.rss_hash_update      = atl_rss_hash_update,
298 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
299 };
300 
301 static inline int32_t
302 atl_reset_hw(struct aq_hw_s *hw)
303 {
304 	return hw_atl_b0_hw_reset(hw);
305 }
306 
307 static inline void
308 atl_enable_intr(struct rte_eth_dev *dev)
309 {
310 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
311 
312 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
313 }
314 
315 static void
316 atl_disable_intr(struct aq_hw_s *hw)
317 {
318 	PMD_INIT_FUNC_TRACE();
319 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
320 }
321 
322 static int
323 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
324 {
325 	struct atl_adapter *adapter = eth_dev->data->dev_private;
326 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
327 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
328 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
329 	int err = 0;
330 
331 	PMD_INIT_FUNC_TRACE();
332 
333 	eth_dev->dev_ops = &atl_eth_dev_ops;
334 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
335 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
336 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
337 
338 	/* For secondary processes, the primary process has done all the work */
339 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
340 		return 0;
341 
342 	/* Vendor and Device ID need to be set before init of shared code */
343 	hw->device_id = pci_dev->id.device_id;
344 	hw->vendor_id = pci_dev->id.vendor_id;
345 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
346 
347 	/* Hardware configuration - hardcode */
348 	adapter->hw_cfg.is_lro = false;
349 	adapter->hw_cfg.wol = false;
350 	adapter->hw_cfg.is_rss = false;
351 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
352 
353 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
354 			  AQ_NIC_RATE_5G |
355 			  AQ_NIC_RATE_2G5 |
356 			  AQ_NIC_RATE_1G |
357 			  AQ_NIC_RATE_100M;
358 
359 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
360 	adapter->hw_cfg.aq_rss.indirection_table_size =
361 		HW_ATL_B0_RSS_REDIRECTION_MAX;
362 
363 	hw->aq_nic_cfg = &adapter->hw_cfg;
364 
365 	/* disable interrupt */
366 	atl_disable_intr(hw);
367 
368 	/* Allocate memory for storing MAC addresses */
369 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
370 	if (eth_dev->data->mac_addrs == NULL) {
371 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
372 		return -ENOMEM;
373 	}
374 
375 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
376 	if (err)
377 		return err;
378 
379 	/* Copy the permanent MAC address */
380 	if (hw->aq_fw_ops->get_mac_permanent(hw,
381 			eth_dev->data->mac_addrs->addr_bytes) != 0)
382 		return -EINVAL;
383 
384 	/* Reset the hw statistics */
385 	atl_dev_stats_reset(eth_dev);
386 
387 	rte_intr_callback_register(intr_handle,
388 				   atl_dev_interrupt_handler, eth_dev);
389 
390 	/* enable uio/vfio intr/eventfd mapping */
391 	rte_intr_enable(intr_handle);
392 
393 	/* enable support intr */
394 	atl_enable_intr(eth_dev);
395 
396 	return err;
397 }
398 
399 static int
400 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
401 {
402 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
403 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
404 	struct aq_hw_s *hw;
405 
406 	PMD_INIT_FUNC_TRACE();
407 
408 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
409 		return -EPERM;
410 
411 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
412 
413 	if (hw->adapter_stopped == 0)
414 		atl_dev_close(eth_dev);
415 
416 	eth_dev->dev_ops = NULL;
417 	eth_dev->rx_pkt_burst = NULL;
418 	eth_dev->tx_pkt_burst = NULL;
419 
420 	/* disable uio intr before callback unregister */
421 	rte_intr_disable(intr_handle);
422 	rte_intr_callback_unregister(intr_handle,
423 				     atl_dev_interrupt_handler, eth_dev);
424 
425 	rte_free(eth_dev->data->mac_addrs);
426 	eth_dev->data->mac_addrs = NULL;
427 
428 	return 0;
429 }
430 
431 static int
432 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
433 	struct rte_pci_device *pci_dev)
434 {
435 	return rte_eth_dev_pci_generic_probe(pci_dev,
436 		sizeof(struct atl_adapter), eth_atl_dev_init);
437 }
438 
439 static int
440 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
441 {
442 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
443 }
444 
445 static int
446 atl_dev_configure(struct rte_eth_dev *dev)
447 {
448 	struct atl_interrupt *intr =
449 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
450 
451 	PMD_INIT_FUNC_TRACE();
452 
453 	/* set flag to update link status after init */
454 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
455 
456 	return 0;
457 }
458 
459 /*
460  * Configure device link speed and setup link.
461  * It returns 0 on success.
462  */
463 static int
464 atl_dev_start(struct rte_eth_dev *dev)
465 {
466 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
467 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
468 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
469 	uint32_t intr_vector = 0;
470 	int status;
471 	int err;
472 
473 	PMD_INIT_FUNC_TRACE();
474 
475 	/* set adapter started */
476 	hw->adapter_stopped = 0;
477 
478 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
479 		PMD_INIT_LOG(ERR,
480 		"Invalid link_speeds for port %u, fix speed not supported",
481 				dev->data->port_id);
482 		return -EINVAL;
483 	}
484 
485 	/* disable uio/vfio intr/eventfd mapping */
486 	rte_intr_disable(intr_handle);
487 
488 	/* reinitialize adapter
489 	 * this calls reset and start
490 	 */
491 	status = atl_reset_hw(hw);
492 	if (status != 0)
493 		return -EIO;
494 
495 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
496 
497 	hw_atl_b0_hw_start(hw);
498 	/* check and configure queue intr-vector mapping */
499 	if ((rte_intr_cap_multiple(intr_handle) ||
500 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
501 	    dev->data->dev_conf.intr_conf.rxq != 0) {
502 		intr_vector = dev->data->nb_rx_queues;
503 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
504 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
505 					ATL_MAX_INTR_QUEUE_NUM);
506 			return -ENOTSUP;
507 		}
508 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
509 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
510 			return -1;
511 		}
512 	}
513 
514 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
515 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
516 				    dev->data->nb_rx_queues * sizeof(int), 0);
517 		if (intr_handle->intr_vec == NULL) {
518 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
519 				     " intr_vec", dev->data->nb_rx_queues);
520 			return -ENOMEM;
521 		}
522 	}
523 
524 	/* initialize transmission unit */
525 	atl_tx_init(dev);
526 
527 	/* This can fail when allocating mbufs for descriptor rings */
528 	err = atl_rx_init(dev);
529 	if (err) {
530 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
531 		goto error;
532 	}
533 
534 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
535 		hw->fw_ver_actual >> 24,
536 		(hw->fw_ver_actual >> 16) & 0xFF,
537 		hw->fw_ver_actual & 0xFFFF);
538 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
539 
540 	err = atl_start_queues(dev);
541 	if (err < 0) {
542 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
543 		goto error;
544 	}
545 
546 	err = atl_dev_set_link_up(dev);
547 
548 	err = hw->aq_fw_ops->update_link_status(hw);
549 
550 	if (err)
551 		goto error;
552 
553 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
554 
555 	if (err)
556 		goto error;
557 
558 	if (rte_intr_allow_others(intr_handle)) {
559 		/* check if lsc interrupt is enabled */
560 		if (dev->data->dev_conf.intr_conf.lsc != 0)
561 			atl_dev_lsc_interrupt_setup(dev, true);
562 		else
563 			atl_dev_lsc_interrupt_setup(dev, false);
564 	} else {
565 		rte_intr_callback_unregister(intr_handle,
566 					     atl_dev_interrupt_handler, dev);
567 		if (dev->data->dev_conf.intr_conf.lsc != 0)
568 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
569 				     " no intr multiplex");
570 	}
571 
572 	/* check if rxq interrupt is enabled */
573 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
574 	    rte_intr_dp_is_en(intr_handle))
575 		atl_dev_rxq_interrupt_setup(dev);
576 
577 	/* enable uio/vfio intr/eventfd mapping */
578 	rte_intr_enable(intr_handle);
579 
580 	/* resume enabled intr since hw reset */
581 	atl_enable_intr(dev);
582 
583 	return 0;
584 
585 error:
586 	atl_stop_queues(dev);
587 	return -EIO;
588 }
589 
590 /*
591  * Stop device: disable rx and tx functions to allow for reconfiguring.
592  */
593 static void
594 atl_dev_stop(struct rte_eth_dev *dev)
595 {
596 	struct rte_eth_link link;
597 	struct aq_hw_s *hw =
598 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
599 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
600 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
601 
602 	PMD_INIT_FUNC_TRACE();
603 
604 	/* disable interrupts */
605 	atl_disable_intr(hw);
606 
607 	/* reset the NIC */
608 	atl_reset_hw(hw);
609 	hw->adapter_stopped = 1;
610 
611 	atl_stop_queues(dev);
612 
613 	/* Clear stored conf */
614 	dev->data->scattered_rx = 0;
615 	dev->data->lro = 0;
616 
617 	/* Clear recorded link status */
618 	memset(&link, 0, sizeof(link));
619 	rte_eth_linkstatus_set(dev, &link);
620 
621 	if (!rte_intr_allow_others(intr_handle))
622 		/* resume to the default handler */
623 		rte_intr_callback_register(intr_handle,
624 					   atl_dev_interrupt_handler,
625 					   (void *)dev);
626 
627 	/* Clean datapath event and queue/vec mapping */
628 	rte_intr_efd_disable(intr_handle);
629 	if (intr_handle->intr_vec != NULL) {
630 		rte_free(intr_handle->intr_vec);
631 		intr_handle->intr_vec = NULL;
632 	}
633 }
634 
635 /*
636  * Set device link up: enable tx.
637  */
638 static int
639 atl_dev_set_link_up(struct rte_eth_dev *dev)
640 {
641 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
643 	uint32_t speed_mask = 0;
644 
645 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
646 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
647 	} else {
648 		if (link_speeds & ETH_LINK_SPEED_10G)
649 			speed_mask |= AQ_NIC_RATE_10G;
650 		if (link_speeds & ETH_LINK_SPEED_5G)
651 			speed_mask |= AQ_NIC_RATE_5G;
652 		if (link_speeds & ETH_LINK_SPEED_1G)
653 			speed_mask |= AQ_NIC_RATE_1G;
654 		if (link_speeds & ETH_LINK_SPEED_2_5G)
655 			speed_mask |=  AQ_NIC_RATE_2G5;
656 		if (link_speeds & ETH_LINK_SPEED_100M)
657 			speed_mask |= AQ_NIC_RATE_100M;
658 	}
659 
660 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
661 }
662 
663 /*
664  * Set device link down: disable tx.
665  */
666 static int
667 atl_dev_set_link_down(struct rte_eth_dev *dev)
668 {
669 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
670 
671 	return hw->aq_fw_ops->set_link_speed(hw, 0);
672 }
673 
674 /*
675  * Reset and stop device.
676  */
677 static void
678 atl_dev_close(struct rte_eth_dev *dev)
679 {
680 	PMD_INIT_FUNC_TRACE();
681 
682 	atl_dev_stop(dev);
683 
684 	atl_free_queues(dev);
685 }
686 
687 static int
688 atl_dev_reset(struct rte_eth_dev *dev)
689 {
690 	int ret;
691 
692 	ret = eth_atl_dev_uninit(dev);
693 	if (ret)
694 		return ret;
695 
696 	ret = eth_atl_dev_init(dev);
697 
698 	return ret;
699 }
700 
701 
702 static int
703 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
704 {
705 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
706 	struct aq_hw_s *hw = &adapter->hw;
707 	struct atl_sw_stats *swstats = &adapter->sw_stats;
708 	unsigned int i;
709 
710 	hw->aq_fw_ops->update_stats(hw);
711 
712 	/* Fill out the rte_eth_stats statistics structure */
713 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
714 	stats->ibytes = hw->curr_stats.dma_oct_rc;
715 	stats->imissed = hw->curr_stats.dpc;
716 	stats->ierrors = hw->curr_stats.erpt;
717 
718 	stats->opackets = hw->curr_stats.dma_pkt_tc;
719 	stats->obytes = hw->curr_stats.dma_oct_tc;
720 	stats->oerrors = 0;
721 
722 	stats->rx_nombuf = swstats->rx_nombuf;
723 
724 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
725 		stats->q_ipackets[i] = swstats->q_ipackets[i];
726 		stats->q_opackets[i] = swstats->q_opackets[i];
727 		stats->q_ibytes[i] = swstats->q_ibytes[i];
728 		stats->q_obytes[i] = swstats->q_obytes[i];
729 		stats->q_errors[i] = swstats->q_errors[i];
730 	}
731 	return 0;
732 }
733 
734 static void
735 atl_dev_stats_reset(struct rte_eth_dev *dev)
736 {
737 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
738 	struct aq_hw_s *hw = &adapter->hw;
739 
740 	hw->aq_fw_ops->update_stats(hw);
741 
742 	/* Reset software totals */
743 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
744 
745 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
746 }
747 
748 static int
749 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
750 			 struct rte_eth_xstat_name *xstats_names,
751 			 unsigned int size)
752 {
753 	unsigned int i;
754 
755 	if (!xstats_names)
756 		return RTE_DIM(atl_xstats_tbl);
757 
758 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
759 		snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
760 			atl_xstats_tbl[i].name);
761 
762 	return i;
763 }
764 
765 static int
766 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
767 		   unsigned int n)
768 {
769 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
770 	struct aq_hw_s *hw = &adapter->hw;
771 	unsigned int i;
772 
773 	if (!stats)
774 		return 0;
775 
776 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
777 		stats[i].id = i;
778 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
779 					atl_xstats_tbl[i].offset);
780 	}
781 
782 	return i;
783 }
784 
785 static int
786 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
787 {
788 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
789 	uint32_t fw_ver = 0;
790 	unsigned int ret = 0;
791 
792 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
793 	if (ret)
794 		return -EIO;
795 
796 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
797 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
798 
799 	ret += 1; /* add string null-terminator */
800 
801 	if (fw_size < ret)
802 		return ret;
803 
804 	return 0;
805 }
806 
807 static void
808 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
809 {
810 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
811 
812 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
813 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
814 
815 	dev_info->min_rx_bufsize = 1024;
816 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
817 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
818 	dev_info->max_vfs = pci_dev->max_vfs;
819 
820 	dev_info->max_hash_mac_addrs = 0;
821 	dev_info->max_vmdq_pools = 0;
822 	dev_info->vmdq_queue_num = 0;
823 
824 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
825 
826 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
827 
828 
829 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
830 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
831 	};
832 
833 	dev_info->default_txconf = (struct rte_eth_txconf) {
834 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
835 	};
836 
837 	dev_info->rx_desc_lim = rx_desc_lim;
838 	dev_info->tx_desc_lim = tx_desc_lim;
839 
840 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
841 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
842 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
843 
844 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
845 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
846 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
847 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
848 }
849 
850 static const uint32_t *
851 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
852 {
853 	static const uint32_t ptypes[] = {
854 		RTE_PTYPE_L2_ETHER,
855 		RTE_PTYPE_L2_ETHER_ARP,
856 		RTE_PTYPE_L2_ETHER_VLAN,
857 		RTE_PTYPE_L3_IPV4,
858 		RTE_PTYPE_L3_IPV6,
859 		RTE_PTYPE_L4_TCP,
860 		RTE_PTYPE_L4_UDP,
861 		RTE_PTYPE_L4_SCTP,
862 		RTE_PTYPE_L4_ICMP,
863 		RTE_PTYPE_UNKNOWN
864 	};
865 
866 	if (dev->rx_pkt_burst == atl_recv_pkts)
867 		return ptypes;
868 
869 	return NULL;
870 }
871 
872 /* return 0 means link status changed, -1 means not changed */
873 static int
874 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
875 {
876 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
877 	struct atl_interrupt *intr =
878 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
879 	struct rte_eth_link link, old;
880 	u32 fc = AQ_NIC_FC_OFF;
881 	int err = 0;
882 
883 	link.link_status = ETH_LINK_DOWN;
884 	link.link_speed = 0;
885 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
886 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
887 	memset(&old, 0, sizeof(old));
888 
889 	/* load old link status */
890 	rte_eth_linkstatus_get(dev, &old);
891 
892 	/* read current link status */
893 	err = hw->aq_fw_ops->update_link_status(hw);
894 
895 	if (err)
896 		return 0;
897 
898 	if (hw->aq_link_status.mbps == 0) {
899 		/* write default (down) link status */
900 		rte_eth_linkstatus_set(dev, &link);
901 		if (link.link_status == old.link_status)
902 			return -1;
903 		return 0;
904 	}
905 
906 	intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
907 
908 	link.link_status = ETH_LINK_UP;
909 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
910 	link.link_speed = hw->aq_link_status.mbps;
911 
912 	rte_eth_linkstatus_set(dev, &link);
913 
914 	if (link.link_status == old.link_status)
915 		return -1;
916 
917 	/* Driver has to update flow control settings on RX block
918 	 * on any link event.
919 	 * We should query FW whether it negotiated FC.
920 	 */
921 	if (hw->aq_fw_ops->get_flow_control) {
922 		hw->aq_fw_ops->get_flow_control(hw, &fc);
923 		hw_atl_b0_set_fc(hw, fc, 0U);
924 	}
925 
926 	return 0;
927 }
928 
929 static void
930 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
931 {
932 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933 
934 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
935 }
936 
937 static void
938 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
939 {
940 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941 
942 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
943 }
944 
945 static void
946 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
947 {
948 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
949 
950 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
951 }
952 
953 static void
954 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
955 {
956 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
957 
958 	if (dev->data->promiscuous == 1)
959 		return; /* must remain in all_multicast mode */
960 
961 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
962 }
963 
964 /**
965  * It clears the interrupt causes and enables the interrupt.
966  * It will be called once only during nic initialized.
967  *
968  * @param dev
969  *  Pointer to struct rte_eth_dev.
970  * @param on
971  *  Enable or Disable.
972  *
973  * @return
974  *  - On success, zero.
975  *  - On failure, a negative value.
976  */
977 
978 static int
979 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
980 {
981 	atl_dev_link_status_print(dev);
982 	return 0;
983 }
984 
985 static int
986 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
987 {
988 	return 0;
989 }
990 
991 
992 static int
993 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
994 {
995 	struct atl_interrupt *intr =
996 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
997 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
998 	u64 cause = 0;
999 
1000 	hw_atl_b0_hw_irq_read(hw, &cause);
1001 
1002 	atl_disable_intr(hw);
1003 	intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
1004 			ATL_FLAG_NEED_LINK_UPDATE : 0;
1005 
1006 	return 0;
1007 }
1008 
1009 /**
1010  * It gets and then prints the link status.
1011  *
1012  * @param dev
1013  *  Pointer to struct rte_eth_dev.
1014  *
1015  * @return
1016  *  - On success, zero.
1017  *  - On failure, a negative value.
1018  */
1019 static void
1020 atl_dev_link_status_print(struct rte_eth_dev *dev)
1021 {
1022 	struct rte_eth_link link;
1023 
1024 	memset(&link, 0, sizeof(link));
1025 	rte_eth_linkstatus_get(dev, &link);
1026 	if (link.link_status) {
1027 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1028 					(int)(dev->data->port_id),
1029 					(unsigned int)link.link_speed,
1030 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1031 					"full-duplex" : "half-duplex");
1032 	} else {
1033 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1034 				(int)(dev->data->port_id));
1035 	}
1036 
1037 
1038 #ifdef DEBUG
1039 {
1040 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1041 
1042 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1043 				pci_dev->addr.domain,
1044 				pci_dev->addr.bus,
1045 				pci_dev->addr.devid,
1046 				pci_dev->addr.function);
1047 }
1048 #endif
1049 
1050 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1051 }
1052 
1053 /*
1054  * It executes link_update after knowing an interrupt occurred.
1055  *
1056  * @param dev
1057  *  Pointer to struct rte_eth_dev.
1058  *
1059  * @return
1060  *  - On success, zero.
1061  *  - On failure, a negative value.
1062  */
1063 static int
1064 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1065 			   struct rte_intr_handle *intr_handle)
1066 {
1067 	struct atl_interrupt *intr =
1068 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1069 
1070 	if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1071 		atl_dev_link_update(dev, 0);
1072 		intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1073 		atl_dev_link_status_print(dev);
1074 		_rte_eth_dev_callback_process(dev,
1075 			RTE_ETH_EVENT_INTR_LSC, NULL);
1076 	}
1077 
1078 	atl_enable_intr(dev);
1079 	rte_intr_enable(intr_handle);
1080 
1081 	return 0;
1082 }
1083 
1084 /**
1085  * Interrupt handler triggered by NIC  for handling
1086  * specific interrupt.
1087  *
1088  * @param handle
1089  *  Pointer to interrupt handle.
1090  * @param param
1091  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1092  *
1093  * @return
1094  *  void
1095  */
1096 static void
1097 atl_dev_interrupt_handler(void *param)
1098 {
1099 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1100 
1101 	atl_dev_interrupt_get_status(dev);
1102 	atl_dev_interrupt_action(dev, dev->intr_handle);
1103 }
1104 
1105 static int
1106 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1107 {
1108 	return SFP_EEPROM_SIZE;
1109 }
1110 
1111 static int
1112 atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1113 {
1114 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1115 	int dev_addr = SMBUS_DEVICE_ID;
1116 
1117 	if (hw->aq_fw_ops->get_eeprom == NULL)
1118 		return -ENOTSUP;
1119 
1120 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1121 	    eeprom->data == NULL)
1122 		return -EINVAL;
1123 
1124 	if (eeprom->magic > 0x7F)
1125 		return -EINVAL;
1126 
1127 	if (eeprom->magic)
1128 		dev_addr = eeprom->magic;
1129 
1130 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1131 					 eeprom->length, eeprom->offset);
1132 }
1133 
1134 static int
1135 atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1136 {
1137 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1138 	int dev_addr = SMBUS_DEVICE_ID;
1139 
1140 	if (hw->aq_fw_ops->set_eeprom == NULL)
1141 		return -ENOTSUP;
1142 
1143 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1144 	    eeprom->data == NULL)
1145 		return -EINVAL;
1146 
1147 	if (eeprom->magic > 0x7F)
1148 		return -EINVAL;
1149 
1150 	if (eeprom->magic)
1151 		dev_addr = eeprom->magic;
1152 
1153 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1154 					 eeprom->length, eeprom->offset);
1155 }
1156 
1157 static int
1158 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1159 {
1160 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1161 	u32 mif_id;
1162 	int err;
1163 
1164 	if (regs->data == NULL) {
1165 		regs->length = hw_atl_utils_hw_get_reg_length();
1166 		regs->width = sizeof(u32);
1167 		return 0;
1168 	}
1169 
1170 	/* Only full register dump is supported */
1171 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1172 		return -ENOTSUP;
1173 
1174 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1175 
1176 	/* Device version */
1177 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1178 	regs->version = mif_id & 0xFFU;
1179 
1180 	return err;
1181 }
1182 
1183 static int
1184 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1185 {
1186 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1187 	u32 fc = AQ_NIC_FC_OFF;
1188 
1189 	if (hw->aq_fw_ops->get_flow_control == NULL)
1190 		return -ENOTSUP;
1191 
1192 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1193 
1194 	if (fc == AQ_NIC_FC_OFF)
1195 		fc_conf->mode = RTE_FC_NONE;
1196 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1197 		fc_conf->mode = RTE_FC_FULL;
1198 	else if (fc & AQ_NIC_FC_RX)
1199 		fc_conf->mode = RTE_FC_RX_PAUSE;
1200 	else if (fc & AQ_NIC_FC_TX)
1201 		fc_conf->mode = RTE_FC_TX_PAUSE;
1202 	return 0;
1203 }
1204 
1205 static int
1206 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1207 {
1208 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1209 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1210 
1211 
1212 	if (hw->aq_fw_ops->set_flow_control == NULL)
1213 		return -ENOTSUP;
1214 
1215 	if (fc_conf->mode == RTE_FC_NONE)
1216 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1217 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1218 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1219 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1220 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1221 	else if (fc_conf->mode == RTE_FC_FULL)
1222 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1223 
1224 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1225 		return hw->aq_fw_ops->set_flow_control(hw);
1226 
1227 	return 0;
1228 }
1229 
1230 static int
1231 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1232 		    u8 *mac_addr, bool enable)
1233 {
1234 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1235 	unsigned int h = 0U;
1236 	unsigned int l = 0U;
1237 	int err;
1238 
1239 	if (mac_addr) {
1240 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1241 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1242 			(mac_addr[4] << 8) | mac_addr[5];
1243 	}
1244 
1245 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1246 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1247 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1248 
1249 	if (enable)
1250 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1251 
1252 	err = aq_hw_err_from_flags(hw);
1253 
1254 	return err;
1255 }
1256 
1257 static int
1258 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1259 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1260 {
1261 	if (is_zero_ether_addr(mac_addr)) {
1262 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1263 		return -EINVAL;
1264 	}
1265 
1266 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1267 }
1268 
1269 static void
1270 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1271 {
1272 	atl_update_mac_addr(dev, index, NULL, false);
1273 }
1274 
1275 static int
1276 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1277 {
1278 	atl_remove_mac_addr(dev, 0);
1279 	atl_add_mac_addr(dev, addr, 0, 0);
1280 	return 0;
1281 }
1282 
1283 static int
1284 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1285 {
1286 	struct rte_eth_dev_info dev_info;
1287 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1288 
1289 	atl_dev_info_get(dev, &dev_info);
1290 
1291 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1292 		return -EINVAL;
1293 
1294 	/* update max frame size */
1295 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1296 
1297 	return 0;
1298 }
1299 
1300 static int
1301 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1302 {
1303 	struct aq_hw_cfg_s *cfg =
1304 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1305 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1306 	int err = 0;
1307 	int i = 0;
1308 
1309 	PMD_INIT_FUNC_TRACE();
1310 
1311 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1312 		if (cfg->vlan_filter[i] == vlan_id) {
1313 			if (!on) {
1314 				/* Disable VLAN filter. */
1315 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1316 
1317 				/* Clear VLAN filter entry */
1318 				cfg->vlan_filter[i] = 0;
1319 			}
1320 			break;
1321 		}
1322 	}
1323 
1324 	/* VLAN_ID was not found. So, nothing to delete. */
1325 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1326 		goto exit;
1327 
1328 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1329 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1330 		goto exit;
1331 
1332 	/* Try to found free VLAN filter to add new VLAN_ID */
1333 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1334 		if (cfg->vlan_filter[i] == 0)
1335 			break;
1336 	}
1337 
1338 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1339 		/* We have no free VLAN filter to add new VLAN_ID*/
1340 		err = -ENOMEM;
1341 		goto exit;
1342 	}
1343 
1344 	cfg->vlan_filter[i] = vlan_id;
1345 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1346 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1347 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1348 
1349 exit:
1350 	/* Enable VLAN promisc mode if vlan_filter empty  */
1351 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1352 		if (cfg->vlan_filter[i] != 0)
1353 			break;
1354 	}
1355 
1356 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1357 
1358 	return err;
1359 }
1360 
1361 static int
1362 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1363 {
1364 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1365 	struct aq_hw_cfg_s *cfg =
1366 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1367 	int i;
1368 
1369 	PMD_INIT_FUNC_TRACE();
1370 
1371 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1372 		if (cfg->vlan_filter[i])
1373 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1374 	}
1375 	return 0;
1376 }
1377 
1378 static int
1379 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1380 {
1381 	struct aq_hw_cfg_s *cfg =
1382 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1383 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1384 	int ret = 0;
1385 	int i;
1386 
1387 	PMD_INIT_FUNC_TRACE();
1388 
1389 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1390 
1391 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1392 
1393 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1394 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1395 
1396 	if (mask & ETH_VLAN_EXTEND_MASK)
1397 		ret = -ENOTSUP;
1398 
1399 	return ret;
1400 }
1401 
1402 static int
1403 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1404 		  uint16_t tpid)
1405 {
1406 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1407 	int err = 0;
1408 
1409 	PMD_INIT_FUNC_TRACE();
1410 
1411 	switch (vlan_type) {
1412 	case ETH_VLAN_TYPE_INNER:
1413 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1414 		break;
1415 	case ETH_VLAN_TYPE_OUTER:
1416 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1417 		break;
1418 	default:
1419 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1420 		err = -ENOTSUP;
1421 	}
1422 
1423 	return err;
1424 }
1425 
1426 static void
1427 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1428 {
1429 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1430 
1431 	PMD_INIT_FUNC_TRACE();
1432 
1433 	if (queue_id > dev->data->nb_rx_queues) {
1434 		PMD_DRV_LOG(ERR, "Invalid queue id");
1435 		return;
1436 	}
1437 
1438 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1439 }
1440 
1441 static int
1442 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1443 			  struct ether_addr *mc_addr_set,
1444 			  uint32_t nb_mc_addr)
1445 {
1446 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1447 	u32 i;
1448 
1449 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1450 		return -EINVAL;
1451 
1452 	/* Update whole uc filters table */
1453 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1454 		u8 *mac_addr = NULL;
1455 		u32 l = 0, h = 0;
1456 
1457 		if (i < nb_mc_addr) {
1458 			mac_addr = mc_addr_set[i].addr_bytes;
1459 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1460 				(mac_addr[4] << 8) | mac_addr[5];
1461 			h = (mac_addr[0] << 8) | mac_addr[1];
1462 		}
1463 
1464 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1465 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1466 							HW_ATL_B0_MAC_MIN + i);
1467 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1468 							HW_ATL_B0_MAC_MIN + i);
1469 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1470 					   HW_ATL_B0_MAC_MIN + i);
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int
1477 atl_reta_update(struct rte_eth_dev *dev,
1478 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1479 		   uint16_t reta_size)
1480 {
1481 	int i;
1482 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1483 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1484 
1485 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1486 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1487 					dev->data->nb_rx_queues - 1);
1488 
1489 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1490 	return 0;
1491 }
1492 
1493 static int
1494 atl_reta_query(struct rte_eth_dev *dev,
1495 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1496 		    uint16_t reta_size)
1497 {
1498 	int i;
1499 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1500 
1501 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1502 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1503 	reta_conf->mask = ~0U;
1504 	return 0;
1505 }
1506 
1507 static int
1508 atl_rss_hash_update(struct rte_eth_dev *dev,
1509 				 struct rte_eth_rss_conf *rss_conf)
1510 {
1511 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1512 	struct aq_hw_cfg_s *cfg =
1513 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1514 	static u8 def_rss_key[40] = {
1515 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1516 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1517 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1518 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1519 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1520 	};
1521 
1522 	cfg->is_rss = !!rss_conf->rss_hf;
1523 	if (rss_conf->rss_key) {
1524 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1525 		       rss_conf->rss_key_len);
1526 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1527 	} else {
1528 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1529 		       sizeof(def_rss_key));
1530 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1531 	}
1532 
1533 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1534 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1535 	return 0;
1536 }
1537 
1538 static int
1539 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1540 				 struct rte_eth_rss_conf *rss_conf)
1541 {
1542 	struct aq_hw_cfg_s *cfg =
1543 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1544 
1545 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1546 	if (rss_conf->rss_key) {
1547 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1548 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1549 		       rss_conf->rss_key_len);
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1556 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1557 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1558 
1559 RTE_INIT(atl_init_log)
1560 {
1561 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1562 	if (atl_logtype_init >= 0)
1563 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1564 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1565 	if (atl_logtype_driver >= 0)
1566 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1567 }
1568 
1569