xref: /f-stack/dpdk/drivers/net/atlantic/atl_ethdev.c (revision 04b1440d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_ethdev_pci.h>
6 
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
10 #include "atl_logs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
14 
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
17 
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 			       struct rte_eth_dev_info *dev_info);
48 
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
52 
53 /* VLAN stuff */
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 		uint16_t vlan_id, int on);
56 
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 				     uint16_t queue_id, int on);
61 
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 			     enum rte_vlan_type vlan_type, uint16_t tpid);
64 
65 /* EEPROM */
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 			      struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 
72 /* Regs */
73 static int atl_dev_get_regs(struct rte_eth_dev *dev,
74 			    struct rte_dev_reg_info *regs);
75 
76 /* Flow control */
77 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
78 			       struct rte_eth_fc_conf *fc_conf);
79 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 
82 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
83 
84 /* Interrupts */
85 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
86 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
87 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
88 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
89 				    struct rte_intr_handle *handle);
90 static void atl_dev_interrupt_handler(void *param);
91 
92 
93 static int atl_add_mac_addr(struct rte_eth_dev *dev,
94 			    struct ether_addr *mac_addr,
95 			    uint32_t index, uint32_t pool);
96 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
97 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
98 					   struct ether_addr *mac_addr);
99 
100 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
101 				    struct ether_addr *mc_addr_set,
102 				    uint32_t nb_mc_addr);
103 
104 /* RSS */
105 static int atl_reta_update(struct rte_eth_dev *dev,
106 			     struct rte_eth_rss_reta_entry64 *reta_conf,
107 			     uint16_t reta_size);
108 static int atl_reta_query(struct rte_eth_dev *dev,
109 			    struct rte_eth_rss_reta_entry64 *reta_conf,
110 			    uint16_t reta_size);
111 static int atl_rss_hash_update(struct rte_eth_dev *dev,
112 				 struct rte_eth_rss_conf *rss_conf);
113 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
114 				   struct rte_eth_rss_conf *rss_conf);
115 
116 
117 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
118 	struct rte_pci_device *pci_dev);
119 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
120 
121 static void atl_dev_info_get(struct rte_eth_dev *dev,
122 				struct rte_eth_dev_info *dev_info);
123 
124 int atl_logtype_init;
125 int atl_logtype_driver;
126 
127 /*
128  * The set of PCI devices this driver supports
129  */
130 static const struct rte_pci_id pci_id_atl_map[] = {
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
136 
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
143 
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
150 
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
152 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
153 	{ .vendor_id = 0, /* sentinel */ },
154 };
155 
156 static struct rte_pci_driver rte_atl_pmd = {
157 	.id_table = pci_id_atl_map,
158 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
159 		     RTE_PCI_DRV_IOVA_AS_VA,
160 	.probe = eth_atl_pci_probe,
161 	.remove = eth_atl_pci_remove,
162 };
163 
164 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
165 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
166 			| DEV_RX_OFFLOAD_UDP_CKSUM \
167 			| DEV_RX_OFFLOAD_TCP_CKSUM \
168 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
169 			| DEV_RX_OFFLOAD_VLAN_FILTER)
170 
171 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
172 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
173 			| DEV_TX_OFFLOAD_UDP_CKSUM \
174 			| DEV_TX_OFFLOAD_TCP_CKSUM \
175 			| DEV_TX_OFFLOAD_TCP_TSO \
176 			| DEV_TX_OFFLOAD_MULTI_SEGS)
177 
178 #define SFP_EEPROM_SIZE 0x100
179 
180 static const struct rte_eth_desc_lim rx_desc_lim = {
181 	.nb_max = ATL_MAX_RING_DESC,
182 	.nb_min = ATL_MIN_RING_DESC,
183 	.nb_align = ATL_RXD_ALIGN,
184 };
185 
186 static const struct rte_eth_desc_lim tx_desc_lim = {
187 	.nb_max = ATL_MAX_RING_DESC,
188 	.nb_min = ATL_MIN_RING_DESC,
189 	.nb_align = ATL_TXD_ALIGN,
190 	.nb_seg_max = ATL_TX_MAX_SEG,
191 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
192 };
193 
194 #define ATL_XSTATS_FIELD(name) { \
195 	#name, \
196 	offsetof(struct aq_stats_s, name) \
197 }
198 
199 struct atl_xstats_tbl_s {
200 	const char *name;
201 	unsigned int offset;
202 };
203 
204 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
205 	ATL_XSTATS_FIELD(uprc),
206 	ATL_XSTATS_FIELD(mprc),
207 	ATL_XSTATS_FIELD(bprc),
208 	ATL_XSTATS_FIELD(erpt),
209 	ATL_XSTATS_FIELD(uptc),
210 	ATL_XSTATS_FIELD(mptc),
211 	ATL_XSTATS_FIELD(bptc),
212 	ATL_XSTATS_FIELD(erpr),
213 	ATL_XSTATS_FIELD(ubrc),
214 	ATL_XSTATS_FIELD(ubtc),
215 	ATL_XSTATS_FIELD(mbrc),
216 	ATL_XSTATS_FIELD(mbtc),
217 	ATL_XSTATS_FIELD(bbrc),
218 	ATL_XSTATS_FIELD(bbtc),
219 };
220 
221 static const struct eth_dev_ops atl_eth_dev_ops = {
222 	.dev_configure	      = atl_dev_configure,
223 	.dev_start	      = atl_dev_start,
224 	.dev_stop	      = atl_dev_stop,
225 	.dev_set_link_up      = atl_dev_set_link_up,
226 	.dev_set_link_down    = atl_dev_set_link_down,
227 	.dev_close	      = atl_dev_close,
228 	.dev_reset	      = atl_dev_reset,
229 
230 	/* PROMISC */
231 	.promiscuous_enable   = atl_dev_promiscuous_enable,
232 	.promiscuous_disable  = atl_dev_promiscuous_disable,
233 	.allmulticast_enable  = atl_dev_allmulticast_enable,
234 	.allmulticast_disable = atl_dev_allmulticast_disable,
235 
236 	/* Link */
237 	.link_update	      = atl_dev_link_update,
238 
239 	.get_reg              = atl_dev_get_regs,
240 
241 	/* Stats */
242 	.stats_get	      = atl_dev_stats_get,
243 	.xstats_get	      = atl_dev_xstats_get,
244 	.xstats_get_names     = atl_dev_xstats_get_names,
245 	.stats_reset	      = atl_dev_stats_reset,
246 	.xstats_reset	      = atl_dev_stats_reset,
247 
248 	.fw_version_get       = atl_fw_version_get,
249 	.dev_infos_get	      = atl_dev_info_get,
250 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
251 
252 	.mtu_set              = atl_dev_mtu_set,
253 
254 	/* VLAN */
255 	.vlan_filter_set      = atl_vlan_filter_set,
256 	.vlan_offload_set     = atl_vlan_offload_set,
257 	.vlan_tpid_set        = atl_vlan_tpid_set,
258 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
259 
260 	/* Queue Control */
261 	.rx_queue_start	      = atl_rx_queue_start,
262 	.rx_queue_stop	      = atl_rx_queue_stop,
263 	.rx_queue_setup       = atl_rx_queue_setup,
264 	.rx_queue_release     = atl_rx_queue_release,
265 
266 	.tx_queue_start	      = atl_tx_queue_start,
267 	.tx_queue_stop	      = atl_tx_queue_stop,
268 	.tx_queue_setup       = atl_tx_queue_setup,
269 	.tx_queue_release     = atl_tx_queue_release,
270 
271 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
272 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
273 
274 	.rx_queue_count       = atl_rx_queue_count,
275 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
276 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
277 
278 	/* EEPROM */
279 	.get_eeprom_length    = atl_dev_get_eeprom_length,
280 	.get_eeprom           = atl_dev_get_eeprom,
281 	.set_eeprom           = atl_dev_set_eeprom,
282 
283 	/* Flow Control */
284 	.flow_ctrl_get	      = atl_flow_ctrl_get,
285 	.flow_ctrl_set	      = atl_flow_ctrl_set,
286 
287 	/* MAC */
288 	.mac_addr_add	      = atl_add_mac_addr,
289 	.mac_addr_remove      = atl_remove_mac_addr,
290 	.mac_addr_set	      = atl_set_default_mac_addr,
291 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
292 	.rxq_info_get	      = atl_rxq_info_get,
293 	.txq_info_get	      = atl_txq_info_get,
294 
295 	.reta_update          = atl_reta_update,
296 	.reta_query           = atl_reta_query,
297 	.rss_hash_update      = atl_rss_hash_update,
298 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
299 };
300 
301 static inline int32_t
302 atl_reset_hw(struct aq_hw_s *hw)
303 {
304 	return hw_atl_b0_hw_reset(hw);
305 }
306 
307 static inline void
308 atl_enable_intr(struct rte_eth_dev *dev)
309 {
310 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
311 
312 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
313 }
314 
315 static void
316 atl_disable_intr(struct aq_hw_s *hw)
317 {
318 	PMD_INIT_FUNC_TRACE();
319 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
320 }
321 
322 static int
323 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
324 {
325 	struct atl_adapter *adapter =
326 		(struct atl_adapter *)eth_dev->data->dev_private;
327 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
328 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
329 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
330 	int err = 0;
331 
332 	PMD_INIT_FUNC_TRACE();
333 
334 	eth_dev->dev_ops = &atl_eth_dev_ops;
335 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
336 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
337 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
338 
339 	/* For secondary processes, the primary process has done all the work */
340 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
341 		return 0;
342 
343 	/* Vendor and Device ID need to be set before init of shared code */
344 	hw->device_id = pci_dev->id.device_id;
345 	hw->vendor_id = pci_dev->id.vendor_id;
346 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
347 
348 	/* Hardware configuration - hardcode */
349 	adapter->hw_cfg.is_lro = false;
350 	adapter->hw_cfg.wol = false;
351 	adapter->hw_cfg.is_rss = false;
352 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
353 
354 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
355 			  AQ_NIC_RATE_5G |
356 			  AQ_NIC_RATE_2G5 |
357 			  AQ_NIC_RATE_1G |
358 			  AQ_NIC_RATE_100M;
359 
360 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
361 	adapter->hw_cfg.aq_rss.indirection_table_size =
362 		HW_ATL_B0_RSS_REDIRECTION_MAX;
363 
364 	hw->aq_nic_cfg = &adapter->hw_cfg;
365 
366 	/* disable interrupt */
367 	atl_disable_intr(hw);
368 
369 	/* Allocate memory for storing MAC addresses */
370 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
371 	if (eth_dev->data->mac_addrs == NULL) {
372 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
373 		return -ENOMEM;
374 	}
375 
376 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
377 	if (err)
378 		return err;
379 
380 	/* Copy the permanent MAC address */
381 	if (hw->aq_fw_ops->get_mac_permanent(hw,
382 			eth_dev->data->mac_addrs->addr_bytes) != 0)
383 		return -EINVAL;
384 
385 	/* Reset the hw statistics */
386 	atl_dev_stats_reset(eth_dev);
387 
388 	rte_intr_callback_register(intr_handle,
389 				   atl_dev_interrupt_handler, eth_dev);
390 
391 	/* enable uio/vfio intr/eventfd mapping */
392 	rte_intr_enable(intr_handle);
393 
394 	/* enable support intr */
395 	atl_enable_intr(eth_dev);
396 
397 	return err;
398 }
399 
400 static int
401 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
402 {
403 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
404 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
405 	struct aq_hw_s *hw;
406 
407 	PMD_INIT_FUNC_TRACE();
408 
409 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
410 		return -EPERM;
411 
412 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
413 
414 	if (hw->adapter_stopped == 0)
415 		atl_dev_close(eth_dev);
416 
417 	eth_dev->dev_ops = NULL;
418 	eth_dev->rx_pkt_burst = NULL;
419 	eth_dev->tx_pkt_burst = NULL;
420 
421 	/* disable uio intr before callback unregister */
422 	rte_intr_disable(intr_handle);
423 	rte_intr_callback_unregister(intr_handle,
424 				     atl_dev_interrupt_handler, eth_dev);
425 
426 	rte_free(eth_dev->data->mac_addrs);
427 	eth_dev->data->mac_addrs = NULL;
428 
429 	return 0;
430 }
431 
432 static int
433 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
434 	struct rte_pci_device *pci_dev)
435 {
436 	return rte_eth_dev_pci_generic_probe(pci_dev,
437 		sizeof(struct atl_adapter), eth_atl_dev_init);
438 }
439 
440 static int
441 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
442 {
443 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
444 }
445 
446 static int
447 atl_dev_configure(struct rte_eth_dev *dev)
448 {
449 	struct atl_interrupt *intr =
450 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
451 
452 	PMD_INIT_FUNC_TRACE();
453 
454 	/* set flag to update link status after init */
455 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
456 
457 	return 0;
458 }
459 
460 /*
461  * Configure device link speed and setup link.
462  * It returns 0 on success.
463  */
464 static int
465 atl_dev_start(struct rte_eth_dev *dev)
466 {
467 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
468 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
469 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
470 	uint32_t intr_vector = 0;
471 	int status;
472 	int err;
473 
474 	PMD_INIT_FUNC_TRACE();
475 
476 	/* set adapter started */
477 	hw->adapter_stopped = 0;
478 
479 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
480 		PMD_INIT_LOG(ERR,
481 		"Invalid link_speeds for port %u, fix speed not supported",
482 				dev->data->port_id);
483 		return -EINVAL;
484 	}
485 
486 	/* disable uio/vfio intr/eventfd mapping */
487 	rte_intr_disable(intr_handle);
488 
489 	/* reinitialize adapter
490 	 * this calls reset and start
491 	 */
492 	status = atl_reset_hw(hw);
493 	if (status != 0)
494 		return -EIO;
495 
496 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
497 
498 	hw_atl_b0_hw_start(hw);
499 	/* check and configure queue intr-vector mapping */
500 	if ((rte_intr_cap_multiple(intr_handle) ||
501 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
502 	    dev->data->dev_conf.intr_conf.rxq != 0) {
503 		intr_vector = dev->data->nb_rx_queues;
504 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
505 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
506 					ATL_MAX_INTR_QUEUE_NUM);
507 			return -ENOTSUP;
508 		}
509 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
510 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
511 			return -1;
512 		}
513 	}
514 
515 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
516 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
517 				    dev->data->nb_rx_queues * sizeof(int), 0);
518 		if (intr_handle->intr_vec == NULL) {
519 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
520 				     " intr_vec", dev->data->nb_rx_queues);
521 			return -ENOMEM;
522 		}
523 	}
524 
525 	/* initialize transmission unit */
526 	atl_tx_init(dev);
527 
528 	/* This can fail when allocating mbufs for descriptor rings */
529 	err = atl_rx_init(dev);
530 	if (err) {
531 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
532 		goto error;
533 	}
534 
535 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
536 		hw->fw_ver_actual >> 24,
537 		(hw->fw_ver_actual >> 16) & 0xFF,
538 		hw->fw_ver_actual & 0xFFFF);
539 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
540 
541 	err = atl_start_queues(dev);
542 	if (err < 0) {
543 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
544 		goto error;
545 	}
546 
547 	err = atl_dev_set_link_up(dev);
548 
549 	err = hw->aq_fw_ops->update_link_status(hw);
550 
551 	if (err)
552 		goto error;
553 
554 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
555 
556 	if (err)
557 		goto error;
558 
559 	if (rte_intr_allow_others(intr_handle)) {
560 		/* check if lsc interrupt is enabled */
561 		if (dev->data->dev_conf.intr_conf.lsc != 0)
562 			atl_dev_lsc_interrupt_setup(dev, true);
563 		else
564 			atl_dev_lsc_interrupt_setup(dev, false);
565 	} else {
566 		rte_intr_callback_unregister(intr_handle,
567 					     atl_dev_interrupt_handler, dev);
568 		if (dev->data->dev_conf.intr_conf.lsc != 0)
569 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
570 				     " no intr multiplex");
571 	}
572 
573 	/* check if rxq interrupt is enabled */
574 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
575 	    rte_intr_dp_is_en(intr_handle))
576 		atl_dev_rxq_interrupt_setup(dev);
577 
578 	/* enable uio/vfio intr/eventfd mapping */
579 	rte_intr_enable(intr_handle);
580 
581 	/* resume enabled intr since hw reset */
582 	atl_enable_intr(dev);
583 
584 	return 0;
585 
586 error:
587 	atl_stop_queues(dev);
588 	return -EIO;
589 }
590 
591 /*
592  * Stop device: disable rx and tx functions to allow for reconfiguring.
593  */
594 static void
595 atl_dev_stop(struct rte_eth_dev *dev)
596 {
597 	struct rte_eth_link link;
598 	struct aq_hw_s *hw =
599 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
600 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
601 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
602 
603 	PMD_INIT_FUNC_TRACE();
604 
605 	/* disable interrupts */
606 	atl_disable_intr(hw);
607 
608 	/* reset the NIC */
609 	atl_reset_hw(hw);
610 	hw->adapter_stopped = 1;
611 
612 	atl_stop_queues(dev);
613 
614 	/* Clear stored conf */
615 	dev->data->scattered_rx = 0;
616 	dev->data->lro = 0;
617 
618 	/* Clear recorded link status */
619 	memset(&link, 0, sizeof(link));
620 	rte_eth_linkstatus_set(dev, &link);
621 
622 	if (!rte_intr_allow_others(intr_handle))
623 		/* resume to the default handler */
624 		rte_intr_callback_register(intr_handle,
625 					   atl_dev_interrupt_handler,
626 					   (void *)dev);
627 
628 	/* Clean datapath event and queue/vec mapping */
629 	rte_intr_efd_disable(intr_handle);
630 	if (intr_handle->intr_vec != NULL) {
631 		rte_free(intr_handle->intr_vec);
632 		intr_handle->intr_vec = NULL;
633 	}
634 }
635 
636 /*
637  * Set device link up: enable tx.
638  */
639 static int
640 atl_dev_set_link_up(struct rte_eth_dev *dev)
641 {
642 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
643 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
644 	uint32_t speed_mask = 0;
645 
646 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
647 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
648 	} else {
649 		if (link_speeds & ETH_LINK_SPEED_10G)
650 			speed_mask |= AQ_NIC_RATE_10G;
651 		if (link_speeds & ETH_LINK_SPEED_5G)
652 			speed_mask |= AQ_NIC_RATE_5G;
653 		if (link_speeds & ETH_LINK_SPEED_1G)
654 			speed_mask |= AQ_NIC_RATE_1G;
655 		if (link_speeds & ETH_LINK_SPEED_2_5G)
656 			speed_mask |=  AQ_NIC_RATE_2G5;
657 		if (link_speeds & ETH_LINK_SPEED_100M)
658 			speed_mask |= AQ_NIC_RATE_100M;
659 	}
660 
661 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
662 }
663 
664 /*
665  * Set device link down: disable tx.
666  */
667 static int
668 atl_dev_set_link_down(struct rte_eth_dev *dev)
669 {
670 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
671 
672 	return hw->aq_fw_ops->set_link_speed(hw, 0);
673 }
674 
675 /*
676  * Reset and stop device.
677  */
678 static void
679 atl_dev_close(struct rte_eth_dev *dev)
680 {
681 	PMD_INIT_FUNC_TRACE();
682 
683 	atl_dev_stop(dev);
684 
685 	atl_free_queues(dev);
686 }
687 
688 static int
689 atl_dev_reset(struct rte_eth_dev *dev)
690 {
691 	int ret;
692 
693 	ret = eth_atl_dev_uninit(dev);
694 	if (ret)
695 		return ret;
696 
697 	ret = eth_atl_dev_init(dev);
698 
699 	return ret;
700 }
701 
702 
703 static int
704 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
705 {
706 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
707 	struct aq_hw_s *hw = &adapter->hw;
708 	struct atl_sw_stats *swstats = &adapter->sw_stats;
709 	unsigned int i;
710 
711 	hw->aq_fw_ops->update_stats(hw);
712 
713 	/* Fill out the rte_eth_stats statistics structure */
714 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
715 	stats->ibytes = hw->curr_stats.dma_oct_rc;
716 	stats->imissed = hw->curr_stats.dpc;
717 	stats->ierrors = hw->curr_stats.erpt;
718 
719 	stats->opackets = hw->curr_stats.dma_pkt_tc;
720 	stats->obytes = hw->curr_stats.dma_oct_tc;
721 	stats->oerrors = 0;
722 
723 	stats->rx_nombuf = swstats->rx_nombuf;
724 
725 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
726 		stats->q_ipackets[i] = swstats->q_ipackets[i];
727 		stats->q_opackets[i] = swstats->q_opackets[i];
728 		stats->q_ibytes[i] = swstats->q_ibytes[i];
729 		stats->q_obytes[i] = swstats->q_obytes[i];
730 		stats->q_errors[i] = swstats->q_errors[i];
731 	}
732 	return 0;
733 }
734 
735 static void
736 atl_dev_stats_reset(struct rte_eth_dev *dev)
737 {
738 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
739 	struct aq_hw_s *hw = &adapter->hw;
740 
741 	hw->aq_fw_ops->update_stats(hw);
742 
743 	/* Reset software totals */
744 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
745 
746 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
747 }
748 
749 static int
750 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
751 			 struct rte_eth_xstat_name *xstats_names,
752 			 unsigned int size)
753 {
754 	unsigned int i;
755 
756 	if (!xstats_names)
757 		return RTE_DIM(atl_xstats_tbl);
758 
759 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
760 		snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
761 			atl_xstats_tbl[i].name);
762 
763 	return i;
764 }
765 
766 static int
767 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
768 		   unsigned int n)
769 {
770 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
771 	struct aq_hw_s *hw = &adapter->hw;
772 	unsigned int i;
773 
774 	if (!stats)
775 		return 0;
776 
777 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
778 		stats[i].id = i;
779 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
780 					atl_xstats_tbl[i].offset);
781 	}
782 
783 	return i;
784 }
785 
786 static int
787 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
788 {
789 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
790 	uint32_t fw_ver = 0;
791 	unsigned int ret = 0;
792 
793 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
794 	if (ret)
795 		return -EIO;
796 
797 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
798 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
799 
800 	ret += 1; /* add string null-terminator */
801 
802 	if (fw_size < ret)
803 		return ret;
804 
805 	return 0;
806 }
807 
808 static void
809 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
810 {
811 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
812 
813 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
814 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
815 
816 	dev_info->min_rx_bufsize = 1024;
817 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
818 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
819 	dev_info->max_vfs = pci_dev->max_vfs;
820 
821 	dev_info->max_hash_mac_addrs = 0;
822 	dev_info->max_vmdq_pools = 0;
823 	dev_info->vmdq_queue_num = 0;
824 
825 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
826 
827 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
828 
829 
830 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
831 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
832 	};
833 
834 	dev_info->default_txconf = (struct rte_eth_txconf) {
835 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
836 	};
837 
838 	dev_info->rx_desc_lim = rx_desc_lim;
839 	dev_info->tx_desc_lim = tx_desc_lim;
840 
841 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
842 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
843 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
844 
845 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
846 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
847 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
848 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
849 }
850 
851 static const uint32_t *
852 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
853 {
854 	static const uint32_t ptypes[] = {
855 		RTE_PTYPE_L2_ETHER,
856 		RTE_PTYPE_L2_ETHER_ARP,
857 		RTE_PTYPE_L2_ETHER_VLAN,
858 		RTE_PTYPE_L3_IPV4,
859 		RTE_PTYPE_L3_IPV6,
860 		RTE_PTYPE_L4_TCP,
861 		RTE_PTYPE_L4_UDP,
862 		RTE_PTYPE_L4_SCTP,
863 		RTE_PTYPE_L4_ICMP,
864 		RTE_PTYPE_UNKNOWN
865 	};
866 
867 	if (dev->rx_pkt_burst == atl_recv_pkts)
868 		return ptypes;
869 
870 	return NULL;
871 }
872 
873 /* return 0 means link status changed, -1 means not changed */
874 static int
875 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
876 {
877 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
878 	struct atl_interrupt *intr =
879 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
880 	struct rte_eth_link link, old;
881 	u32 fc = AQ_NIC_FC_OFF;
882 	int err = 0;
883 
884 	link.link_status = ETH_LINK_DOWN;
885 	link.link_speed = 0;
886 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
887 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
888 	memset(&old, 0, sizeof(old));
889 
890 	/* load old link status */
891 	rte_eth_linkstatus_get(dev, &old);
892 
893 	/* read current link status */
894 	err = hw->aq_fw_ops->update_link_status(hw);
895 
896 	if (err)
897 		return 0;
898 
899 	if (hw->aq_link_status.mbps == 0) {
900 		/* write default (down) link status */
901 		rte_eth_linkstatus_set(dev, &link);
902 		if (link.link_status == old.link_status)
903 			return -1;
904 		return 0;
905 	}
906 
907 	intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
908 
909 	link.link_status = ETH_LINK_UP;
910 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
911 	link.link_speed = hw->aq_link_status.mbps;
912 
913 	rte_eth_linkstatus_set(dev, &link);
914 
915 	if (link.link_status == old.link_status)
916 		return -1;
917 
918 	/* Driver has to update flow control settings on RX block
919 	 * on any link event.
920 	 * We should query FW whether it negotiated FC.
921 	 */
922 	if (hw->aq_fw_ops->get_flow_control) {
923 		hw->aq_fw_ops->get_flow_control(hw, &fc);
924 		hw_atl_b0_set_fc(hw, fc, 0U);
925 	}
926 
927 	return 0;
928 }
929 
930 static void
931 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
932 {
933 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
934 
935 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
936 }
937 
938 static void
939 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
940 {
941 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942 
943 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
944 }
945 
946 static void
947 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
948 {
949 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
950 
951 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
952 }
953 
954 static void
955 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
956 {
957 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
958 
959 	if (dev->data->promiscuous == 1)
960 		return; /* must remain in all_multicast mode */
961 
962 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
963 }
964 
965 /**
966  * It clears the interrupt causes and enables the interrupt.
967  * It will be called once only during nic initialized.
968  *
969  * @param dev
970  *  Pointer to struct rte_eth_dev.
971  * @param on
972  *  Enable or Disable.
973  *
974  * @return
975  *  - On success, zero.
976  *  - On failure, a negative value.
977  */
978 
979 static int
980 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
981 {
982 	atl_dev_link_status_print(dev);
983 	return 0;
984 }
985 
986 static int
987 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
988 {
989 	return 0;
990 }
991 
992 
993 static int
994 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
995 {
996 	struct atl_interrupt *intr =
997 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
998 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
999 	u64 cause = 0;
1000 
1001 	hw_atl_b0_hw_irq_read(hw, &cause);
1002 
1003 	atl_disable_intr(hw);
1004 	intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
1005 			ATL_FLAG_NEED_LINK_UPDATE : 0;
1006 
1007 	return 0;
1008 }
1009 
1010 /**
1011  * It gets and then prints the link status.
1012  *
1013  * @param dev
1014  *  Pointer to struct rte_eth_dev.
1015  *
1016  * @return
1017  *  - On success, zero.
1018  *  - On failure, a negative value.
1019  */
1020 static void
1021 atl_dev_link_status_print(struct rte_eth_dev *dev)
1022 {
1023 	struct rte_eth_link link;
1024 
1025 	memset(&link, 0, sizeof(link));
1026 	rte_eth_linkstatus_get(dev, &link);
1027 	if (link.link_status) {
1028 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1029 					(int)(dev->data->port_id),
1030 					(unsigned int)link.link_speed,
1031 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1032 					"full-duplex" : "half-duplex");
1033 	} else {
1034 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1035 				(int)(dev->data->port_id));
1036 	}
1037 
1038 
1039 #ifdef DEBUG
1040 {
1041 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1042 
1043 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1044 				pci_dev->addr.domain,
1045 				pci_dev->addr.bus,
1046 				pci_dev->addr.devid,
1047 				pci_dev->addr.function);
1048 }
1049 #endif
1050 
1051 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1052 }
1053 
1054 /*
1055  * It executes link_update after knowing an interrupt occurred.
1056  *
1057  * @param dev
1058  *  Pointer to struct rte_eth_dev.
1059  *
1060  * @return
1061  *  - On success, zero.
1062  *  - On failure, a negative value.
1063  */
1064 static int
1065 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1066 			   struct rte_intr_handle *intr_handle)
1067 {
1068 	struct atl_interrupt *intr =
1069 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1070 
1071 	if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1072 		atl_dev_link_update(dev, 0);
1073 		intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1074 		atl_dev_link_status_print(dev);
1075 		_rte_eth_dev_callback_process(dev,
1076 			RTE_ETH_EVENT_INTR_LSC, NULL);
1077 	}
1078 
1079 	atl_enable_intr(dev);
1080 	rte_intr_enable(intr_handle);
1081 
1082 	return 0;
1083 }
1084 
1085 /**
1086  * Interrupt handler triggered by NIC  for handling
1087  * specific interrupt.
1088  *
1089  * @param handle
1090  *  Pointer to interrupt handle.
1091  * @param param
1092  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1093  *
1094  * @return
1095  *  void
1096  */
1097 static void
1098 atl_dev_interrupt_handler(void *param)
1099 {
1100 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1101 
1102 	atl_dev_interrupt_get_status(dev);
1103 	atl_dev_interrupt_action(dev, dev->intr_handle);
1104 }
1105 
1106 static int
1107 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1108 {
1109 	return SFP_EEPROM_SIZE;
1110 }
1111 
1112 static int
1113 atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1114 {
1115 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1116 	int dev_addr = SMBUS_DEVICE_ID;
1117 
1118 	if (hw->aq_fw_ops->get_eeprom == NULL)
1119 		return -ENOTSUP;
1120 
1121 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1122 	    eeprom->data == NULL)
1123 		return -EINVAL;
1124 
1125 	if (eeprom->magic > 0x7F)
1126 		return -EINVAL;
1127 
1128 	if (eeprom->magic)
1129 		dev_addr = eeprom->magic;
1130 
1131 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1132 					 eeprom->length, eeprom->offset);
1133 }
1134 
1135 static int
1136 atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1137 {
1138 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1139 	int dev_addr = SMBUS_DEVICE_ID;
1140 
1141 	if (hw->aq_fw_ops->set_eeprom == NULL)
1142 		return -ENOTSUP;
1143 
1144 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1145 	    eeprom->data == NULL)
1146 		return -EINVAL;
1147 
1148 	if (eeprom->magic > 0x7F)
1149 		return -EINVAL;
1150 
1151 	if (eeprom->magic)
1152 		dev_addr = eeprom->magic;
1153 
1154 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1155 					 eeprom->length, eeprom->offset);
1156 }
1157 
1158 static int
1159 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1160 {
1161 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1162 	u32 mif_id;
1163 	int err;
1164 
1165 	if (regs->data == NULL) {
1166 		regs->length = hw_atl_utils_hw_get_reg_length();
1167 		regs->width = sizeof(u32);
1168 		return 0;
1169 	}
1170 
1171 	/* Only full register dump is supported */
1172 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1173 		return -ENOTSUP;
1174 
1175 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1176 
1177 	/* Device version */
1178 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1179 	regs->version = mif_id & 0xFFU;
1180 
1181 	return err;
1182 }
1183 
1184 static int
1185 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1186 {
1187 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1188 	u32 fc = AQ_NIC_FC_OFF;
1189 
1190 	if (hw->aq_fw_ops->get_flow_control == NULL)
1191 		return -ENOTSUP;
1192 
1193 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1194 
1195 	if (fc == AQ_NIC_FC_OFF)
1196 		fc_conf->mode = RTE_FC_NONE;
1197 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1198 		fc_conf->mode = RTE_FC_FULL;
1199 	else if (fc & AQ_NIC_FC_RX)
1200 		fc_conf->mode = RTE_FC_RX_PAUSE;
1201 	else if (fc & AQ_NIC_FC_TX)
1202 		fc_conf->mode = RTE_FC_TX_PAUSE;
1203 	return 0;
1204 }
1205 
1206 static int
1207 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1208 {
1209 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1210 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1211 
1212 
1213 	if (hw->aq_fw_ops->set_flow_control == NULL)
1214 		return -ENOTSUP;
1215 
1216 	if (fc_conf->mode == RTE_FC_NONE)
1217 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1218 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1219 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1220 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1221 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1222 	else if (fc_conf->mode == RTE_FC_FULL)
1223 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1224 
1225 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1226 		return hw->aq_fw_ops->set_flow_control(hw);
1227 
1228 	return 0;
1229 }
1230 
1231 static int
1232 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1233 		    u8 *mac_addr, bool enable)
1234 {
1235 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1236 	unsigned int h = 0U;
1237 	unsigned int l = 0U;
1238 	int err;
1239 
1240 	if (mac_addr) {
1241 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1242 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1243 			(mac_addr[4] << 8) | mac_addr[5];
1244 	}
1245 
1246 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1247 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1248 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1249 
1250 	if (enable)
1251 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1252 
1253 	err = aq_hw_err_from_flags(hw);
1254 
1255 	return err;
1256 }
1257 
1258 static int
1259 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1260 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1261 {
1262 	if (is_zero_ether_addr(mac_addr)) {
1263 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1264 		return -EINVAL;
1265 	}
1266 
1267 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1268 }
1269 
1270 static void
1271 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1272 {
1273 	atl_update_mac_addr(dev, index, NULL, false);
1274 }
1275 
1276 static int
1277 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1278 {
1279 	atl_remove_mac_addr(dev, 0);
1280 	atl_add_mac_addr(dev, addr, 0, 0);
1281 	return 0;
1282 }
1283 
1284 static int
1285 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1286 {
1287 	struct rte_eth_dev_info dev_info;
1288 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1289 
1290 	atl_dev_info_get(dev, &dev_info);
1291 
1292 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1293 		return -EINVAL;
1294 
1295 	/* update max frame size */
1296 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1297 
1298 	return 0;
1299 }
1300 
1301 static int
1302 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1303 {
1304 	struct aq_hw_cfg_s *cfg =
1305 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1306 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1307 	int err = 0;
1308 	int i = 0;
1309 
1310 	PMD_INIT_FUNC_TRACE();
1311 
1312 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1313 		if (cfg->vlan_filter[i] == vlan_id) {
1314 			if (!on) {
1315 				/* Disable VLAN filter. */
1316 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1317 
1318 				/* Clear VLAN filter entry */
1319 				cfg->vlan_filter[i] = 0;
1320 			}
1321 			break;
1322 		}
1323 	}
1324 
1325 	/* VLAN_ID was not found. So, nothing to delete. */
1326 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1327 		goto exit;
1328 
1329 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1330 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1331 		goto exit;
1332 
1333 	/* Try to found free VLAN filter to add new VLAN_ID */
1334 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1335 		if (cfg->vlan_filter[i] == 0)
1336 			break;
1337 	}
1338 
1339 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1340 		/* We have no free VLAN filter to add new VLAN_ID*/
1341 		err = -ENOMEM;
1342 		goto exit;
1343 	}
1344 
1345 	cfg->vlan_filter[i] = vlan_id;
1346 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1347 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1348 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1349 
1350 exit:
1351 	/* Enable VLAN promisc mode if vlan_filter empty  */
1352 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1353 		if (cfg->vlan_filter[i] != 0)
1354 			break;
1355 	}
1356 
1357 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1358 
1359 	return err;
1360 }
1361 
1362 static int
1363 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1364 {
1365 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1366 	struct aq_hw_cfg_s *cfg =
1367 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1368 	int i;
1369 
1370 	PMD_INIT_FUNC_TRACE();
1371 
1372 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1373 		if (cfg->vlan_filter[i])
1374 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1375 	}
1376 	return 0;
1377 }
1378 
1379 static int
1380 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1381 {
1382 	struct aq_hw_cfg_s *cfg =
1383 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1384 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1385 	int ret = 0;
1386 	int i;
1387 
1388 	PMD_INIT_FUNC_TRACE();
1389 
1390 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1391 
1392 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1393 
1394 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1395 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1396 
1397 	if (mask & ETH_VLAN_EXTEND_MASK)
1398 		ret = -ENOTSUP;
1399 
1400 	return ret;
1401 }
1402 
1403 static int
1404 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1405 		  uint16_t tpid)
1406 {
1407 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1408 	int err = 0;
1409 
1410 	PMD_INIT_FUNC_TRACE();
1411 
1412 	switch (vlan_type) {
1413 	case ETH_VLAN_TYPE_INNER:
1414 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1415 		break;
1416 	case ETH_VLAN_TYPE_OUTER:
1417 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1418 		break;
1419 	default:
1420 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1421 		err = -ENOTSUP;
1422 	}
1423 
1424 	return err;
1425 }
1426 
1427 static void
1428 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1429 {
1430 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431 
1432 	PMD_INIT_FUNC_TRACE();
1433 
1434 	if (queue_id > dev->data->nb_rx_queues) {
1435 		PMD_DRV_LOG(ERR, "Invalid queue id");
1436 		return;
1437 	}
1438 
1439 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1440 }
1441 
1442 static int
1443 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1444 			  struct ether_addr *mc_addr_set,
1445 			  uint32_t nb_mc_addr)
1446 {
1447 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1448 	u32 i;
1449 
1450 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1451 		return -EINVAL;
1452 
1453 	/* Update whole uc filters table */
1454 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1455 		u8 *mac_addr = NULL;
1456 		u32 l = 0, h = 0;
1457 
1458 		if (i < nb_mc_addr) {
1459 			mac_addr = mc_addr_set[i].addr_bytes;
1460 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1461 				(mac_addr[4] << 8) | mac_addr[5];
1462 			h = (mac_addr[0] << 8) | mac_addr[1];
1463 		}
1464 
1465 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1466 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1467 							HW_ATL_B0_MAC_MIN + i);
1468 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1469 							HW_ATL_B0_MAC_MIN + i);
1470 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1471 					   HW_ATL_B0_MAC_MIN + i);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 static int
1478 atl_reta_update(struct rte_eth_dev *dev,
1479 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1480 		   uint16_t reta_size)
1481 {
1482 	int i;
1483 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1484 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1485 
1486 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1487 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1488 					dev->data->nb_rx_queues - 1);
1489 
1490 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1491 	return 0;
1492 }
1493 
1494 static int
1495 atl_reta_query(struct rte_eth_dev *dev,
1496 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1497 		    uint16_t reta_size)
1498 {
1499 	int i;
1500 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1501 
1502 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1503 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1504 	reta_conf->mask = ~0U;
1505 	return 0;
1506 }
1507 
1508 static int
1509 atl_rss_hash_update(struct rte_eth_dev *dev,
1510 				 struct rte_eth_rss_conf *rss_conf)
1511 {
1512 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1513 	struct aq_hw_cfg_s *cfg =
1514 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1515 	static u8 def_rss_key[40] = {
1516 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1517 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1518 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1519 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1520 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1521 	};
1522 
1523 	cfg->is_rss = !!rss_conf->rss_hf;
1524 	if (rss_conf->rss_key) {
1525 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1526 		       rss_conf->rss_key_len);
1527 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1528 	} else {
1529 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1530 		       sizeof(def_rss_key));
1531 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1532 	}
1533 
1534 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1535 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1536 	return 0;
1537 }
1538 
1539 static int
1540 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1541 				 struct rte_eth_rss_conf *rss_conf)
1542 {
1543 	struct aq_hw_cfg_s *cfg =
1544 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1545 
1546 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1547 	if (rss_conf->rss_key) {
1548 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1549 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1550 		       rss_conf->rss_key_len);
1551 	}
1552 
1553 	return 0;
1554 }
1555 
1556 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1557 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1558 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1559 
1560 RTE_INIT(atl_init_log)
1561 {
1562 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1563 	if (atl_logtype_init >= 0)
1564 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1565 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1566 	if (atl_logtype_driver >= 0)
1567 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1568 }
1569 
1570