1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
3 */
4
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static int atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
33 unsigned int size);
34
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
37
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
40
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 size_t fw_size);
45
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
49
50 /* VLAN stuff */
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52 uint16_t vlan_id, int on);
53
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57 uint16_t queue_id, int on);
58
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60 enum rte_vlan_type vlan_type, uint16_t tpid);
61
62 /* EEPROM */
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65 struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67 struct rte_dev_eeprom_info *eeprom);
68
69 /* Regs */
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71 struct rte_dev_reg_info *regs);
72
73 /* Flow control */
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75 struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77 struct rte_eth_fc_conf *fc_conf);
78
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
80
81 /* Interrupts */
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86 struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
88
89
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91 struct rte_ether_addr *mac_addr,
92 uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95 struct rte_ether_addr *mac_addr);
96
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98 struct rte_ether_addr *mc_addr_set,
99 uint32_t nb_mc_addr);
100
101 /* RSS */
102 static int atl_reta_update(struct rte_eth_dev *dev,
103 struct rte_eth_rss_reta_entry64 *reta_conf,
104 uint16_t reta_size);
105 static int atl_reta_query(struct rte_eth_dev *dev,
106 struct rte_eth_rss_reta_entry64 *reta_conf,
107 uint16_t reta_size);
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109 struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
112
113
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115 struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119 struct rte_eth_dev_info *dev_info);
120
121 /*
122 * The set of PCI devices this driver supports
123 */
124 static const struct rte_pci_id pci_id_atl_map[] = {
125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
130
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
137
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
144
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147 { .vendor_id = 0, /* sentinel */ },
148 };
149
150 static struct rte_pci_driver rte_atl_pmd = {
151 .id_table = pci_id_atl_map,
152 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153 .probe = eth_atl_pci_probe,
154 .remove = eth_atl_pci_remove,
155 };
156
157 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
158 | DEV_RX_OFFLOAD_IPV4_CKSUM \
159 | DEV_RX_OFFLOAD_UDP_CKSUM \
160 | DEV_RX_OFFLOAD_TCP_CKSUM \
161 | DEV_RX_OFFLOAD_JUMBO_FRAME \
162 | DEV_RX_OFFLOAD_MACSEC_STRIP \
163 | DEV_RX_OFFLOAD_VLAN_FILTER)
164
165 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
166 | DEV_TX_OFFLOAD_IPV4_CKSUM \
167 | DEV_TX_OFFLOAD_UDP_CKSUM \
168 | DEV_TX_OFFLOAD_TCP_CKSUM \
169 | DEV_TX_OFFLOAD_TCP_TSO \
170 | DEV_TX_OFFLOAD_MACSEC_INSERT \
171 | DEV_TX_OFFLOAD_MULTI_SEGS)
172
173 #define SFP_EEPROM_SIZE 0x100
174
175 static const struct rte_eth_desc_lim rx_desc_lim = {
176 .nb_max = ATL_MAX_RING_DESC,
177 .nb_min = ATL_MIN_RING_DESC,
178 .nb_align = ATL_RXD_ALIGN,
179 };
180
181 static const struct rte_eth_desc_lim tx_desc_lim = {
182 .nb_max = ATL_MAX_RING_DESC,
183 .nb_min = ATL_MIN_RING_DESC,
184 .nb_align = ATL_TXD_ALIGN,
185 .nb_seg_max = ATL_TX_MAX_SEG,
186 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
187 };
188
189 enum atl_xstats_type {
190 XSTATS_TYPE_MSM = 0,
191 XSTATS_TYPE_MACSEC,
192 };
193
194 #define ATL_XSTATS_FIELD(name) { \
195 #name, \
196 offsetof(struct aq_stats_s, name), \
197 XSTATS_TYPE_MSM \
198 }
199
200 #define ATL_MACSEC_XSTATS_FIELD(name) { \
201 #name, \
202 offsetof(struct macsec_stats, name), \
203 XSTATS_TYPE_MACSEC \
204 }
205
206 struct atl_xstats_tbl_s {
207 const char *name;
208 unsigned int offset;
209 enum atl_xstats_type type;
210 };
211
212 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
213 ATL_XSTATS_FIELD(uprc),
214 ATL_XSTATS_FIELD(mprc),
215 ATL_XSTATS_FIELD(bprc),
216 ATL_XSTATS_FIELD(erpt),
217 ATL_XSTATS_FIELD(uptc),
218 ATL_XSTATS_FIELD(mptc),
219 ATL_XSTATS_FIELD(bptc),
220 ATL_XSTATS_FIELD(erpr),
221 ATL_XSTATS_FIELD(ubrc),
222 ATL_XSTATS_FIELD(ubtc),
223 ATL_XSTATS_FIELD(mbrc),
224 ATL_XSTATS_FIELD(mbtc),
225 ATL_XSTATS_FIELD(bbrc),
226 ATL_XSTATS_FIELD(bbtc),
227 /* Ingress Common Counters */
228 ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
229 ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
230 ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
231 ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
232 ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
233 ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
234 ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
235 ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
236 /* Ingress SA Counters */
237 ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
238 ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
239 ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
240 ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
241 ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
242 ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
243 ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
244 ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
245 ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
246 /* Egress Common Counters */
247 ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
248 ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
249 ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
250 ATL_MACSEC_XSTATS_FIELD(out_too_long),
251 /* Egress SC Counters */
252 ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
253 ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
254 /* Egress SA Counters */
255 ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
256 ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
257 ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
258 ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
259 };
260
261 static const struct eth_dev_ops atl_eth_dev_ops = {
262 .dev_configure = atl_dev_configure,
263 .dev_start = atl_dev_start,
264 .dev_stop = atl_dev_stop,
265 .dev_set_link_up = atl_dev_set_link_up,
266 .dev_set_link_down = atl_dev_set_link_down,
267 .dev_close = atl_dev_close,
268 .dev_reset = atl_dev_reset,
269
270 /* PROMISC */
271 .promiscuous_enable = atl_dev_promiscuous_enable,
272 .promiscuous_disable = atl_dev_promiscuous_disable,
273 .allmulticast_enable = atl_dev_allmulticast_enable,
274 .allmulticast_disable = atl_dev_allmulticast_disable,
275
276 /* Link */
277 .link_update = atl_dev_link_update,
278
279 .get_reg = atl_dev_get_regs,
280
281 /* Stats */
282 .stats_get = atl_dev_stats_get,
283 .xstats_get = atl_dev_xstats_get,
284 .xstats_get_names = atl_dev_xstats_get_names,
285 .stats_reset = atl_dev_stats_reset,
286 .xstats_reset = atl_dev_stats_reset,
287
288 .fw_version_get = atl_fw_version_get,
289 .dev_infos_get = atl_dev_info_get,
290 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
291
292 .mtu_set = atl_dev_mtu_set,
293
294 /* VLAN */
295 .vlan_filter_set = atl_vlan_filter_set,
296 .vlan_offload_set = atl_vlan_offload_set,
297 .vlan_tpid_set = atl_vlan_tpid_set,
298 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
299
300 /* Queue Control */
301 .rx_queue_start = atl_rx_queue_start,
302 .rx_queue_stop = atl_rx_queue_stop,
303 .rx_queue_setup = atl_rx_queue_setup,
304 .rx_queue_release = atl_rx_queue_release,
305
306 .tx_queue_start = atl_tx_queue_start,
307 .tx_queue_stop = atl_tx_queue_stop,
308 .tx_queue_setup = atl_tx_queue_setup,
309 .tx_queue_release = atl_tx_queue_release,
310
311 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
312 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
313
314 /* EEPROM */
315 .get_eeprom_length = atl_dev_get_eeprom_length,
316 .get_eeprom = atl_dev_get_eeprom,
317 .set_eeprom = atl_dev_set_eeprom,
318
319 /* Flow Control */
320 .flow_ctrl_get = atl_flow_ctrl_get,
321 .flow_ctrl_set = atl_flow_ctrl_set,
322
323 /* MAC */
324 .mac_addr_add = atl_add_mac_addr,
325 .mac_addr_remove = atl_remove_mac_addr,
326 .mac_addr_set = atl_set_default_mac_addr,
327 .set_mc_addr_list = atl_dev_set_mc_addr_list,
328 .rxq_info_get = atl_rxq_info_get,
329 .txq_info_get = atl_txq_info_get,
330
331 .reta_update = atl_reta_update,
332 .reta_query = atl_reta_query,
333 .rss_hash_update = atl_rss_hash_update,
334 .rss_hash_conf_get = atl_rss_hash_conf_get,
335 };
336
337 static inline int32_t
atl_reset_hw(struct aq_hw_s * hw)338 atl_reset_hw(struct aq_hw_s *hw)
339 {
340 return hw_atl_b0_hw_reset(hw);
341 }
342
343 static inline void
atl_enable_intr(struct rte_eth_dev * dev)344 atl_enable_intr(struct rte_eth_dev *dev)
345 {
346 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
347
348 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
349 }
350
351 static void
atl_disable_intr(struct aq_hw_s * hw)352 atl_disable_intr(struct aq_hw_s *hw)
353 {
354 PMD_INIT_FUNC_TRACE();
355 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
356 }
357
358 static int
eth_atl_dev_init(struct rte_eth_dev * eth_dev)359 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
360 {
361 struct atl_adapter *adapter = eth_dev->data->dev_private;
362 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
363 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
365 int err = 0;
366
367 PMD_INIT_FUNC_TRACE();
368
369 eth_dev->dev_ops = &atl_eth_dev_ops;
370
371 eth_dev->rx_queue_count = atl_rx_queue_count;
372 eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
373 eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
374
375 eth_dev->rx_pkt_burst = &atl_recv_pkts;
376 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
377 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
378
379 /* For secondary processes, the primary process has done all the work */
380 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
381 return 0;
382
383 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
384
385 /* Vendor and Device ID need to be set before init of shared code */
386 hw->device_id = pci_dev->id.device_id;
387 hw->vendor_id = pci_dev->id.vendor_id;
388 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
389
390 /* Hardware configuration - hardcode */
391 adapter->hw_cfg.is_lro = false;
392 adapter->hw_cfg.wol = false;
393 adapter->hw_cfg.is_rss = false;
394 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
395
396 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
397 AQ_NIC_RATE_5G |
398 AQ_NIC_RATE_2G5 |
399 AQ_NIC_RATE_1G |
400 AQ_NIC_RATE_100M;
401
402 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
403 adapter->hw_cfg.aq_rss.indirection_table_size =
404 HW_ATL_B0_RSS_REDIRECTION_MAX;
405
406 hw->aq_nic_cfg = &adapter->hw_cfg;
407
408 pthread_mutex_init(&hw->mbox_mutex, NULL);
409
410 /* disable interrupt */
411 atl_disable_intr(hw);
412
413 /* Allocate memory for storing MAC addresses */
414 eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 RTE_ETHER_ADDR_LEN, 0);
416 if (eth_dev->data->mac_addrs == NULL) {
417 PMD_INIT_LOG(ERR, "MAC Malloc failed");
418 return -ENOMEM;
419 }
420
421 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
422 if (err)
423 return err;
424
425 /* Copy the permanent MAC address */
426 if (hw->aq_fw_ops->get_mac_permanent(hw,
427 eth_dev->data->mac_addrs->addr_bytes) != 0)
428 return -EINVAL;
429
430 /* Reset the hw statistics */
431 atl_dev_stats_reset(eth_dev);
432
433 rte_intr_callback_register(intr_handle,
434 atl_dev_interrupt_handler, eth_dev);
435
436 /* enable uio/vfio intr/eventfd mapping */
437 rte_intr_enable(intr_handle);
438
439 /* enable support intr */
440 atl_enable_intr(eth_dev);
441
442 return err;
443 }
444
445 static int
eth_atl_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)446 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
447 struct rte_pci_device *pci_dev)
448 {
449 return rte_eth_dev_pci_generic_probe(pci_dev,
450 sizeof(struct atl_adapter), eth_atl_dev_init);
451 }
452
453 static int
eth_atl_pci_remove(struct rte_pci_device * pci_dev)454 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
455 {
456 return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
457 }
458
459 static int
atl_dev_configure(struct rte_eth_dev * dev)460 atl_dev_configure(struct rte_eth_dev *dev)
461 {
462 struct atl_interrupt *intr =
463 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
464
465 PMD_INIT_FUNC_TRACE();
466
467 /* set flag to update link status after init */
468 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
469
470 return 0;
471 }
472
473 /*
474 * Configure device link speed and setup link.
475 * It returns 0 on success.
476 */
477 static int
atl_dev_start(struct rte_eth_dev * dev)478 atl_dev_start(struct rte_eth_dev *dev)
479 {
480 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
481 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
482 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
483 uint32_t intr_vector = 0;
484 int status;
485 int err;
486
487 PMD_INIT_FUNC_TRACE();
488
489 /* set adapter started */
490 hw->adapter_stopped = 0;
491
492 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
493 PMD_INIT_LOG(ERR,
494 "Invalid link_speeds for port %u, fix speed not supported",
495 dev->data->port_id);
496 return -EINVAL;
497 }
498
499 /* disable uio/vfio intr/eventfd mapping */
500 rte_intr_disable(intr_handle);
501
502 /* reinitialize adapter
503 * this calls reset and start
504 */
505 status = atl_reset_hw(hw);
506 if (status != 0)
507 return -EIO;
508
509 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
510
511 hw_atl_b0_hw_start(hw);
512 /* check and configure queue intr-vector mapping */
513 if ((rte_intr_cap_multiple(intr_handle) ||
514 !RTE_ETH_DEV_SRIOV(dev).active) &&
515 dev->data->dev_conf.intr_conf.rxq != 0) {
516 intr_vector = dev->data->nb_rx_queues;
517 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
518 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
519 ATL_MAX_INTR_QUEUE_NUM);
520 return -ENOTSUP;
521 }
522 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
523 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
524 return -1;
525 }
526 }
527
528 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
529 intr_handle->intr_vec = rte_zmalloc("intr_vec",
530 dev->data->nb_rx_queues * sizeof(int), 0);
531 if (intr_handle->intr_vec == NULL) {
532 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
533 " intr_vec", dev->data->nb_rx_queues);
534 return -ENOMEM;
535 }
536 }
537
538 /* initialize transmission unit */
539 atl_tx_init(dev);
540
541 /* This can fail when allocating mbufs for descriptor rings */
542 err = atl_rx_init(dev);
543 if (err) {
544 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
545 goto error;
546 }
547
548 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
549 hw->fw_ver_actual >> 24,
550 (hw->fw_ver_actual >> 16) & 0xFF,
551 hw->fw_ver_actual & 0xFFFF);
552 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
553
554 err = atl_start_queues(dev);
555 if (err < 0) {
556 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
557 goto error;
558 }
559
560 err = atl_dev_set_link_up(dev);
561
562 err = hw->aq_fw_ops->update_link_status(hw);
563
564 if (err)
565 goto error;
566
567 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
568
569 if (rte_intr_allow_others(intr_handle)) {
570 /* check if lsc interrupt is enabled */
571 if (dev->data->dev_conf.intr_conf.lsc != 0)
572 atl_dev_lsc_interrupt_setup(dev, true);
573 else
574 atl_dev_lsc_interrupt_setup(dev, false);
575 } else {
576 rte_intr_callback_unregister(intr_handle,
577 atl_dev_interrupt_handler, dev);
578 if (dev->data->dev_conf.intr_conf.lsc != 0)
579 PMD_INIT_LOG(INFO, "lsc won't enable because of"
580 " no intr multiplex");
581 }
582
583 /* check if rxq interrupt is enabled */
584 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
585 rte_intr_dp_is_en(intr_handle))
586 atl_dev_rxq_interrupt_setup(dev);
587
588 /* enable uio/vfio intr/eventfd mapping */
589 rte_intr_enable(intr_handle);
590
591 /* resume enabled intr since hw reset */
592 atl_enable_intr(dev);
593
594 return 0;
595
596 error:
597 atl_stop_queues(dev);
598 return -EIO;
599 }
600
601 /*
602 * Stop device: disable rx and tx functions to allow for reconfiguring.
603 */
604 static int
atl_dev_stop(struct rte_eth_dev * dev)605 atl_dev_stop(struct rte_eth_dev *dev)
606 {
607 struct rte_eth_link link;
608 struct aq_hw_s *hw =
609 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
610 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
611 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
612
613 PMD_INIT_FUNC_TRACE();
614 dev->data->dev_started = 0;
615
616 /* disable interrupts */
617 atl_disable_intr(hw);
618
619 /* reset the NIC */
620 atl_reset_hw(hw);
621 hw->adapter_stopped = 1;
622
623 atl_stop_queues(dev);
624
625 /* Clear stored conf */
626 dev->data->scattered_rx = 0;
627 dev->data->lro = 0;
628
629 /* Clear recorded link status */
630 memset(&link, 0, sizeof(link));
631 rte_eth_linkstatus_set(dev, &link);
632
633 if (!rte_intr_allow_others(intr_handle))
634 /* resume to the default handler */
635 rte_intr_callback_register(intr_handle,
636 atl_dev_interrupt_handler,
637 (void *)dev);
638
639 /* Clean datapath event and queue/vec mapping */
640 rte_intr_efd_disable(intr_handle);
641 if (intr_handle->intr_vec != NULL) {
642 rte_free(intr_handle->intr_vec);
643 intr_handle->intr_vec = NULL;
644 }
645
646 return 0;
647 }
648
649 /*
650 * Set device link up: enable tx.
651 */
652 static int
atl_dev_set_link_up(struct rte_eth_dev * dev)653 atl_dev_set_link_up(struct rte_eth_dev *dev)
654 {
655 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
656 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
657 uint32_t speed_mask = 0;
658
659 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
660 speed_mask = hw->aq_nic_cfg->link_speed_msk;
661 } else {
662 if (link_speeds & ETH_LINK_SPEED_10G)
663 speed_mask |= AQ_NIC_RATE_10G;
664 if (link_speeds & ETH_LINK_SPEED_5G)
665 speed_mask |= AQ_NIC_RATE_5G;
666 if (link_speeds & ETH_LINK_SPEED_1G)
667 speed_mask |= AQ_NIC_RATE_1G;
668 if (link_speeds & ETH_LINK_SPEED_2_5G)
669 speed_mask |= AQ_NIC_RATE_2G5;
670 if (link_speeds & ETH_LINK_SPEED_100M)
671 speed_mask |= AQ_NIC_RATE_100M;
672 }
673
674 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
675 }
676
677 /*
678 * Set device link down: disable tx.
679 */
680 static int
atl_dev_set_link_down(struct rte_eth_dev * dev)681 atl_dev_set_link_down(struct rte_eth_dev *dev)
682 {
683 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684
685 return hw->aq_fw_ops->set_link_speed(hw, 0);
686 }
687
688 /*
689 * Reset and stop device.
690 */
691 static int
atl_dev_close(struct rte_eth_dev * dev)692 atl_dev_close(struct rte_eth_dev *dev)
693 {
694 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
695 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
696 struct aq_hw_s *hw;
697 int ret;
698
699 PMD_INIT_FUNC_TRACE();
700
701 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
702 return 0;
703
704 hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
705
706 ret = atl_dev_stop(dev);
707
708 atl_free_queues(dev);
709
710 /* disable uio intr before callback unregister */
711 rte_intr_disable(intr_handle);
712 rte_intr_callback_unregister(intr_handle,
713 atl_dev_interrupt_handler, dev);
714
715 pthread_mutex_destroy(&hw->mbox_mutex);
716
717 return ret;
718 }
719
720 static int
atl_dev_reset(struct rte_eth_dev * dev)721 atl_dev_reset(struct rte_eth_dev *dev)
722 {
723 int ret;
724
725 ret = atl_dev_close(dev);
726 if (ret)
727 return ret;
728
729 ret = eth_atl_dev_init(dev);
730
731 return ret;
732 }
733
734 static int
atl_dev_configure_macsec(struct rte_eth_dev * dev)735 atl_dev_configure_macsec(struct rte_eth_dev *dev)
736 {
737 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
738 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
739 struct aq_macsec_config *aqcfg = &cf->aq_macsec;
740 struct macsec_msg_fw_request msg_macsec;
741 struct macsec_msg_fw_response response;
742
743 if (!aqcfg->common.macsec_enabled ||
744 hw->aq_fw_ops->send_macsec_req == NULL)
745 return 0;
746
747 memset(&msg_macsec, 0, sizeof(msg_macsec));
748
749 /* Creating set of sc/sa structures from parameters provided by DPDK */
750
751 /* Configure macsec */
752 msg_macsec.msg_type = macsec_cfg_msg;
753 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
754 msg_macsec.cfg.interrupts_enabled = 1;
755
756 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
757
758 if (response.result)
759 return -1;
760
761 memset(&msg_macsec, 0, sizeof(msg_macsec));
762
763 /* Configure TX SC */
764
765 msg_macsec.msg_type = macsec_add_tx_sc_msg;
766 msg_macsec.txsc.index = 0; /* TXSC always one (??) */
767 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
768
769 /* MAC addr for TX */
770 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
771 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
772 msg_macsec.txsc.sa_mask = 0x3f;
773
774 msg_macsec.txsc.da_mask = 0;
775 msg_macsec.txsc.tci = 0x0B;
776 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
777
778 /*
779 * Creating SCI (Secure Channel Identifier).
780 * SCI constructed from Source MAC and Port identifier
781 */
782 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
783 (msg_macsec.txsc.mac_sa[0] >> 16);
784 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
785
786 uint32_t port_identifier = 1;
787
788 msg_macsec.txsc.sci[1] = sci_hi_part;
789 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
790
791 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
792
793 if (response.result)
794 return -1;
795
796 memset(&msg_macsec, 0, sizeof(msg_macsec));
797
798 /* Configure RX SC */
799
800 msg_macsec.msg_type = macsec_add_rx_sc_msg;
801 msg_macsec.rxsc.index = aqcfg->rxsc.pi;
802 msg_macsec.rxsc.replay_protect =
803 aqcfg->common.replay_protection_enabled;
804 msg_macsec.rxsc.anti_replay_window = 0;
805
806 /* MAC addr for RX */
807 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
808 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
809 msg_macsec.rxsc.da_mask = 0;//0x3f;
810
811 msg_macsec.rxsc.sa_mask = 0;
812
813 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
814
815 if (response.result)
816 return -1;
817
818 memset(&msg_macsec, 0, sizeof(msg_macsec));
819
820 /* Configure RX SC */
821
822 msg_macsec.msg_type = macsec_add_tx_sa_msg;
823 msg_macsec.txsa.index = aqcfg->txsa.idx;
824 msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
825
826 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
827 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
828 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
829 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
830
831 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
832
833 if (response.result)
834 return -1;
835
836 memset(&msg_macsec, 0, sizeof(msg_macsec));
837
838 /* Configure RX SA */
839
840 msg_macsec.msg_type = macsec_add_rx_sa_msg;
841 msg_macsec.rxsa.index = aqcfg->rxsa.idx;
842 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
843
844 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
845 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
846 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
847 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
848
849 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
850
851 if (response.result)
852 return -1;
853
854 return 0;
855 }
856
atl_macsec_enable(struct rte_eth_dev * dev,uint8_t encr,uint8_t repl_prot)857 int atl_macsec_enable(struct rte_eth_dev *dev,
858 uint8_t encr, uint8_t repl_prot)
859 {
860 struct aq_hw_cfg_s *cfg =
861 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
862
863 cfg->aq_macsec.common.macsec_enabled = 1;
864 cfg->aq_macsec.common.encryption_enabled = encr;
865 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
866
867 return 0;
868 }
869
atl_macsec_disable(struct rte_eth_dev * dev)870 int atl_macsec_disable(struct rte_eth_dev *dev)
871 {
872 struct aq_hw_cfg_s *cfg =
873 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
874
875 cfg->aq_macsec.common.macsec_enabled = 0;
876
877 return 0;
878 }
879
atl_macsec_config_txsc(struct rte_eth_dev * dev,uint8_t * mac)880 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
881 {
882 struct aq_hw_cfg_s *cfg =
883 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
884
885 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
886 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
887 RTE_ETHER_ADDR_LEN);
888
889 return 0;
890 }
891
atl_macsec_config_rxsc(struct rte_eth_dev * dev,uint8_t * mac,uint16_t pi)892 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
893 uint8_t *mac, uint16_t pi)
894 {
895 struct aq_hw_cfg_s *cfg =
896 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
897
898 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
899 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
900 RTE_ETHER_ADDR_LEN);
901 cfg->aq_macsec.rxsc.pi = pi;
902
903 return 0;
904 }
905
atl_macsec_select_txsa(struct rte_eth_dev * dev,uint8_t idx,uint8_t an,uint32_t pn,uint8_t * key)906 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
907 uint8_t idx, uint8_t an,
908 uint32_t pn, uint8_t *key)
909 {
910 struct aq_hw_cfg_s *cfg =
911 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
912
913 cfg->aq_macsec.txsa.idx = idx;
914 cfg->aq_macsec.txsa.pn = pn;
915 cfg->aq_macsec.txsa.an = an;
916
917 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
918 return 0;
919 }
920
atl_macsec_select_rxsa(struct rte_eth_dev * dev,uint8_t idx,uint8_t an,uint32_t pn,uint8_t * key)921 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
922 uint8_t idx, uint8_t an,
923 uint32_t pn, uint8_t *key)
924 {
925 struct aq_hw_cfg_s *cfg =
926 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
927
928 cfg->aq_macsec.rxsa.idx = idx;
929 cfg->aq_macsec.rxsa.pn = pn;
930 cfg->aq_macsec.rxsa.an = an;
931
932 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
933 return 0;
934 }
935
936 static int
atl_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)937 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
938 {
939 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
940 struct aq_hw_s *hw = &adapter->hw;
941 struct atl_sw_stats *swstats = &adapter->sw_stats;
942 unsigned int i;
943
944 hw->aq_fw_ops->update_stats(hw);
945
946 /* Fill out the rte_eth_stats statistics structure */
947 stats->ipackets = hw->curr_stats.dma_pkt_rc;
948 stats->ibytes = hw->curr_stats.dma_oct_rc;
949 stats->imissed = hw->curr_stats.dpc;
950 stats->ierrors = hw->curr_stats.erpt;
951
952 stats->opackets = hw->curr_stats.dma_pkt_tc;
953 stats->obytes = hw->curr_stats.dma_oct_tc;
954 stats->oerrors = 0;
955
956 stats->rx_nombuf = swstats->rx_nombuf;
957
958 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
959 stats->q_ipackets[i] = swstats->q_ipackets[i];
960 stats->q_opackets[i] = swstats->q_opackets[i];
961 stats->q_ibytes[i] = swstats->q_ibytes[i];
962 stats->q_obytes[i] = swstats->q_obytes[i];
963 stats->q_errors[i] = swstats->q_errors[i];
964 }
965 return 0;
966 }
967
968 static int
atl_dev_stats_reset(struct rte_eth_dev * dev)969 atl_dev_stats_reset(struct rte_eth_dev *dev)
970 {
971 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
972 struct aq_hw_s *hw = &adapter->hw;
973
974 hw->aq_fw_ops->update_stats(hw);
975
976 /* Reset software totals */
977 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
978
979 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
980
981 return 0;
982 }
983
984 static int
atl_dev_xstats_get_count(struct rte_eth_dev * dev)985 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
986 {
987 struct atl_adapter *adapter =
988 (struct atl_adapter *)dev->data->dev_private;
989
990 struct aq_hw_s *hw = &adapter->hw;
991 unsigned int i, count = 0;
992
993 for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
994 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
995 ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
996 continue;
997
998 count++;
999 }
1000
1001 return count;
1002 }
1003
1004 static int
atl_dev_xstats_get_names(struct rte_eth_dev * dev __rte_unused,struct rte_eth_xstat_name * xstats_names,unsigned int size)1005 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1006 struct rte_eth_xstat_name *xstats_names,
1007 unsigned int size)
1008 {
1009 unsigned int i;
1010 unsigned int count = atl_dev_xstats_get_count(dev);
1011
1012 if (xstats_names) {
1013 for (i = 0; i < size && i < count; i++) {
1014 snprintf(xstats_names[i].name,
1015 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1016 atl_xstats_tbl[i].name);
1017 }
1018 }
1019
1020 return count;
1021 }
1022
1023 static int
atl_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * stats,unsigned int n)1024 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1025 unsigned int n)
1026 {
1027 struct atl_adapter *adapter = dev->data->dev_private;
1028 struct aq_hw_s *hw = &adapter->hw;
1029 struct get_stats req = { 0 };
1030 struct macsec_msg_fw_request msg = { 0 };
1031 struct macsec_msg_fw_response resp = { 0 };
1032 int err = -1;
1033 unsigned int i;
1034 unsigned int count = atl_dev_xstats_get_count(dev);
1035
1036 if (!stats)
1037 return count;
1038
1039 if (hw->aq_fw_ops->send_macsec_req != NULL) {
1040 req.ingress_sa_index = 0xff;
1041 req.egress_sc_index = 0xff;
1042 req.egress_sa_index = 0xff;
1043
1044 msg.msg_type = macsec_get_stats_msg;
1045 msg.stats = req;
1046
1047 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1048 }
1049
1050 for (i = 0; i < n && i < count; i++) {
1051 stats[i].id = i;
1052
1053 switch (atl_xstats_tbl[i].type) {
1054 case XSTATS_TYPE_MSM:
1055 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1056 atl_xstats_tbl[i].offset);
1057 break;
1058 case XSTATS_TYPE_MACSEC:
1059 if (!err) {
1060 stats[i].value =
1061 *(u64 *)((uint8_t *)&resp.stats +
1062 atl_xstats_tbl[i].offset);
1063 }
1064 break;
1065 }
1066 }
1067
1068 return i;
1069 }
1070
1071 static int
atl_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)1072 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1073 {
1074 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 uint32_t fw_ver = 0;
1076 unsigned int ret = 0;
1077
1078 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1079 if (ret)
1080 return -EIO;
1081
1082 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1083 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1084
1085 ret += 1; /* add string null-terminator */
1086
1087 if (fw_size < ret)
1088 return ret;
1089
1090 return 0;
1091 }
1092
1093 static int
atl_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1094 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1095 {
1096 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1097
1098 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1099 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1100
1101 dev_info->min_rx_bufsize = 1024;
1102 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1103 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1104 dev_info->max_vfs = pci_dev->max_vfs;
1105
1106 dev_info->max_hash_mac_addrs = 0;
1107 dev_info->max_vmdq_pools = 0;
1108 dev_info->vmdq_queue_num = 0;
1109
1110 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1111
1112 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1113
1114
1115 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1116 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1117 };
1118
1119 dev_info->default_txconf = (struct rte_eth_txconf) {
1120 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1121 };
1122
1123 dev_info->rx_desc_lim = rx_desc_lim;
1124 dev_info->tx_desc_lim = tx_desc_lim;
1125
1126 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1127 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1128 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1129
1130 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1131 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1132 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1133 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1134
1135 return 0;
1136 }
1137
1138 static const uint32_t *
atl_dev_supported_ptypes_get(struct rte_eth_dev * dev)1139 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1140 {
1141 static const uint32_t ptypes[] = {
1142 RTE_PTYPE_L2_ETHER,
1143 RTE_PTYPE_L2_ETHER_ARP,
1144 RTE_PTYPE_L2_ETHER_VLAN,
1145 RTE_PTYPE_L3_IPV4,
1146 RTE_PTYPE_L3_IPV6,
1147 RTE_PTYPE_L4_TCP,
1148 RTE_PTYPE_L4_UDP,
1149 RTE_PTYPE_L4_SCTP,
1150 RTE_PTYPE_L4_ICMP,
1151 RTE_PTYPE_UNKNOWN
1152 };
1153
1154 if (dev->rx_pkt_burst == atl_recv_pkts)
1155 return ptypes;
1156
1157 return NULL;
1158 }
1159
1160 static void
atl_dev_delayed_handler(void * param)1161 atl_dev_delayed_handler(void *param)
1162 {
1163 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1164
1165 atl_dev_configure_macsec(dev);
1166 }
1167
1168
1169 /* return 0 means link status changed, -1 means not changed */
1170 static int
atl_dev_link_update(struct rte_eth_dev * dev,int wait __rte_unused)1171 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1172 {
1173 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174 struct rte_eth_link link, old;
1175 u32 fc = AQ_NIC_FC_OFF;
1176 int err = 0;
1177
1178 link.link_status = ETH_LINK_DOWN;
1179 link.link_speed = 0;
1180 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1181 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1182 memset(&old, 0, sizeof(old));
1183
1184 /* load old link status */
1185 rte_eth_linkstatus_get(dev, &old);
1186
1187 /* read current link status */
1188 err = hw->aq_fw_ops->update_link_status(hw);
1189
1190 if (err)
1191 return 0;
1192
1193 if (hw->aq_link_status.mbps == 0) {
1194 /* write default (down) link status */
1195 rte_eth_linkstatus_set(dev, &link);
1196 if (link.link_status == old.link_status)
1197 return -1;
1198 return 0;
1199 }
1200
1201 link.link_status = ETH_LINK_UP;
1202 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1203 link.link_speed = hw->aq_link_status.mbps;
1204
1205 rte_eth_linkstatus_set(dev, &link);
1206
1207 if (link.link_status == old.link_status)
1208 return -1;
1209
1210 /* Driver has to update flow control settings on RX block
1211 * on any link event.
1212 * We should query FW whether it negotiated FC.
1213 */
1214 if (hw->aq_fw_ops->get_flow_control) {
1215 hw->aq_fw_ops->get_flow_control(hw, &fc);
1216 hw_atl_b0_set_fc(hw, fc, 0U);
1217 }
1218
1219 if (rte_eal_alarm_set(1000 * 1000,
1220 atl_dev_delayed_handler, (void *)dev) < 0)
1221 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1222
1223 return 0;
1224 }
1225
1226 static int
atl_dev_promiscuous_enable(struct rte_eth_dev * dev)1227 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1228 {
1229 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230
1231 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1232
1233 return 0;
1234 }
1235
1236 static int
atl_dev_promiscuous_disable(struct rte_eth_dev * dev)1237 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1238 {
1239 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240
1241 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1242
1243 return 0;
1244 }
1245
1246 static int
atl_dev_allmulticast_enable(struct rte_eth_dev * dev)1247 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1248 {
1249 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1250
1251 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1252
1253 return 0;
1254 }
1255
1256 static int
atl_dev_allmulticast_disable(struct rte_eth_dev * dev)1257 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1258 {
1259 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260
1261 if (dev->data->promiscuous == 1)
1262 return 0; /* must remain in all_multicast mode */
1263
1264 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1265
1266 return 0;
1267 }
1268
1269 /**
1270 * It clears the interrupt causes and enables the interrupt.
1271 * It will be called once only during nic initialized.
1272 *
1273 * @param dev
1274 * Pointer to struct rte_eth_dev.
1275 * @param on
1276 * Enable or Disable.
1277 *
1278 * @return
1279 * - On success, zero.
1280 * - On failure, a negative value.
1281 */
1282
1283 static int
atl_dev_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on __rte_unused)1284 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1285 {
1286 atl_dev_link_status_print(dev);
1287 return 0;
1288 }
1289
1290 static int
atl_dev_rxq_interrupt_setup(struct rte_eth_dev * dev __rte_unused)1291 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1292 {
1293 return 0;
1294 }
1295
1296
1297 static int
atl_dev_interrupt_get_status(struct rte_eth_dev * dev)1298 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1299 {
1300 struct atl_interrupt *intr =
1301 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1302 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1303 u64 cause = 0;
1304
1305 hw_atl_b0_hw_irq_read(hw, &cause);
1306
1307 atl_disable_intr(hw);
1308
1309 if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1310 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1311
1312 return 0;
1313 }
1314
1315 /**
1316 * It gets and then prints the link status.
1317 *
1318 * @param dev
1319 * Pointer to struct rte_eth_dev.
1320 *
1321 * @return
1322 * - On success, zero.
1323 * - On failure, a negative value.
1324 */
1325 static void
atl_dev_link_status_print(struct rte_eth_dev * dev)1326 atl_dev_link_status_print(struct rte_eth_dev *dev)
1327 {
1328 struct rte_eth_link link;
1329
1330 memset(&link, 0, sizeof(link));
1331 rte_eth_linkstatus_get(dev, &link);
1332 if (link.link_status) {
1333 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1334 (int)(dev->data->port_id),
1335 (unsigned int)link.link_speed,
1336 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1337 "full-duplex" : "half-duplex");
1338 } else {
1339 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1340 (int)(dev->data->port_id));
1341 }
1342
1343
1344 #ifdef DEBUG
1345 {
1346 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1347
1348 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1349 pci_dev->addr.domain,
1350 pci_dev->addr.bus,
1351 pci_dev->addr.devid,
1352 pci_dev->addr.function);
1353 }
1354 #endif
1355
1356 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1357 }
1358
1359 /*
1360 * It executes link_update after knowing an interrupt occurred.
1361 *
1362 * @param dev
1363 * Pointer to struct rte_eth_dev.
1364 *
1365 * @return
1366 * - On success, zero.
1367 * - On failure, a negative value.
1368 */
1369 static int
atl_dev_interrupt_action(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)1370 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1371 struct rte_intr_handle *intr_handle)
1372 {
1373 struct atl_interrupt *intr =
1374 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1375 struct atl_adapter *adapter = dev->data->dev_private;
1376 struct aq_hw_s *hw = &adapter->hw;
1377
1378 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1379 goto done;
1380
1381 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1382
1383 /* Notify userapp if link status changed */
1384 if (!atl_dev_link_update(dev, 0)) {
1385 atl_dev_link_status_print(dev);
1386 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1387 } else {
1388 if (hw->aq_fw_ops->send_macsec_req == NULL)
1389 goto done;
1390
1391 /* Check macsec Keys expired */
1392 struct get_stats req = { 0 };
1393 struct macsec_msg_fw_request msg = { 0 };
1394 struct macsec_msg_fw_response resp = { 0 };
1395
1396 req.ingress_sa_index = 0x0;
1397 req.egress_sc_index = 0x0;
1398 req.egress_sa_index = 0x0;
1399 msg.msg_type = macsec_get_stats_msg;
1400 msg.stats = req;
1401
1402 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1403 if (err) {
1404 PMD_DRV_LOG(ERR, "send_macsec_req fail");
1405 goto done;
1406 }
1407 if (resp.stats.egress_threshold_expired ||
1408 resp.stats.ingress_threshold_expired ||
1409 resp.stats.egress_expired ||
1410 resp.stats.ingress_expired) {
1411 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1412 rte_eth_dev_callback_process(dev,
1413 RTE_ETH_EVENT_MACSEC, NULL);
1414 }
1415 }
1416 done:
1417 atl_enable_intr(dev);
1418 rte_intr_ack(intr_handle);
1419
1420 return 0;
1421 }
1422
1423 /**
1424 * Interrupt handler triggered by NIC for handling
1425 * specific interrupt.
1426 *
1427 * @param handle
1428 * Pointer to interrupt handle.
1429 * @param param
1430 * The address of parameter (struct rte_eth_dev *) regsitered before.
1431 *
1432 * @return
1433 * void
1434 */
1435 static void
atl_dev_interrupt_handler(void * param)1436 atl_dev_interrupt_handler(void *param)
1437 {
1438 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1439
1440 atl_dev_interrupt_get_status(dev);
1441 atl_dev_interrupt_action(dev, dev->intr_handle);
1442 }
1443
1444
1445 static int
atl_dev_get_eeprom_length(struct rte_eth_dev * dev __rte_unused)1446 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1447 {
1448 return SFP_EEPROM_SIZE;
1449 }
1450
atl_dev_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)1451 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1452 struct rte_dev_eeprom_info *eeprom)
1453 {
1454 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1455 uint32_t dev_addr = SMBUS_DEVICE_ID;
1456
1457 if (hw->aq_fw_ops->get_eeprom == NULL)
1458 return -ENOTSUP;
1459
1460 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1461 eeprom->data == NULL)
1462 return -EINVAL;
1463
1464 if (eeprom->magic > 0x7F)
1465 return -EINVAL;
1466
1467 if (eeprom->magic)
1468 dev_addr = eeprom->magic;
1469
1470 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1471 eeprom->length, eeprom->offset);
1472 }
1473
atl_dev_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)1474 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1475 struct rte_dev_eeprom_info *eeprom)
1476 {
1477 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 uint32_t dev_addr = SMBUS_DEVICE_ID;
1479
1480 if (hw->aq_fw_ops->set_eeprom == NULL)
1481 return -ENOTSUP;
1482
1483 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1484 eeprom->data == NULL)
1485 return -EINVAL;
1486
1487 if (eeprom->magic > 0x7F)
1488 return -EINVAL;
1489
1490 if (eeprom->magic)
1491 dev_addr = eeprom->magic;
1492
1493 return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1494 eeprom->length, eeprom->offset);
1495 }
1496
1497 static int
atl_dev_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)1498 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1499 {
1500 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1501 u32 mif_id;
1502 int err;
1503
1504 if (regs->data == NULL) {
1505 regs->length = hw_atl_utils_hw_get_reg_length();
1506 regs->width = sizeof(u32);
1507 return 0;
1508 }
1509
1510 /* Only full register dump is supported */
1511 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1512 return -ENOTSUP;
1513
1514 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1515
1516 /* Device version */
1517 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1518 regs->version = mif_id & 0xFFU;
1519
1520 return err;
1521 }
1522
1523 static int
atl_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1524 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1525 {
1526 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1527 u32 fc = AQ_NIC_FC_OFF;
1528
1529 if (hw->aq_fw_ops->get_flow_control == NULL)
1530 return -ENOTSUP;
1531
1532 hw->aq_fw_ops->get_flow_control(hw, &fc);
1533
1534 if (fc == AQ_NIC_FC_OFF)
1535 fc_conf->mode = RTE_FC_NONE;
1536 else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1537 fc_conf->mode = RTE_FC_FULL;
1538 else if (fc & AQ_NIC_FC_RX)
1539 fc_conf->mode = RTE_FC_RX_PAUSE;
1540 else if (fc & AQ_NIC_FC_TX)
1541 fc_conf->mode = RTE_FC_TX_PAUSE;
1542
1543 return 0;
1544 }
1545
1546 static int
atl_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1547 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1548 {
1549 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1550 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1551
1552
1553 if (hw->aq_fw_ops->set_flow_control == NULL)
1554 return -ENOTSUP;
1555
1556 if (fc_conf->mode == RTE_FC_NONE)
1557 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1558 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1559 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1560 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1561 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1562 else if (fc_conf->mode == RTE_FC_FULL)
1563 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1564
1565 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1566 return hw->aq_fw_ops->set_flow_control(hw);
1567
1568 return 0;
1569 }
1570
1571 static int
atl_update_mac_addr(struct rte_eth_dev * dev,uint32_t index,u8 * mac_addr,bool enable)1572 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1573 u8 *mac_addr, bool enable)
1574 {
1575 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1576 unsigned int h = 0U;
1577 unsigned int l = 0U;
1578 int err;
1579
1580 if (mac_addr) {
1581 h = (mac_addr[0] << 8) | (mac_addr[1]);
1582 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1583 (mac_addr[4] << 8) | mac_addr[5];
1584 }
1585
1586 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1587 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1588 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1589
1590 if (enable)
1591 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1592
1593 err = aq_hw_err_from_flags(hw);
1594
1595 return err;
1596 }
1597
1598 static int
atl_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index __rte_unused,uint32_t pool __rte_unused)1599 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1600 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1601 {
1602 if (rte_is_zero_ether_addr(mac_addr)) {
1603 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1604 return -EINVAL;
1605 }
1606
1607 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1608 }
1609
1610 static void
atl_remove_mac_addr(struct rte_eth_dev * dev,uint32_t index)1611 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1612 {
1613 atl_update_mac_addr(dev, index, NULL, false);
1614 }
1615
1616 static int
atl_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)1617 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1618 {
1619 atl_remove_mac_addr(dev, 0);
1620 atl_add_mac_addr(dev, addr, 0, 0);
1621 return 0;
1622 }
1623
1624 static int
atl_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1625 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1626 {
1627 struct rte_eth_dev_info dev_info;
1628 int ret;
1629 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1630
1631 ret = atl_dev_info_get(dev, &dev_info);
1632 if (ret != 0)
1633 return ret;
1634
1635 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1636 return -EINVAL;
1637
1638 /* update max frame size */
1639 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1640
1641 return 0;
1642 }
1643
1644 static int
atl_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1645 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1646 {
1647 struct aq_hw_cfg_s *cfg =
1648 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1649 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1650 int err = 0;
1651 int i = 0;
1652
1653 PMD_INIT_FUNC_TRACE();
1654
1655 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1656 if (cfg->vlan_filter[i] == vlan_id) {
1657 if (!on) {
1658 /* Disable VLAN filter. */
1659 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1660
1661 /* Clear VLAN filter entry */
1662 cfg->vlan_filter[i] = 0;
1663 }
1664 break;
1665 }
1666 }
1667
1668 /* VLAN_ID was not found. So, nothing to delete. */
1669 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1670 goto exit;
1671
1672 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1673 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1674 goto exit;
1675
1676 /* Try to found free VLAN filter to add new VLAN_ID */
1677 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1678 if (cfg->vlan_filter[i] == 0)
1679 break;
1680 }
1681
1682 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1683 /* We have no free VLAN filter to add new VLAN_ID*/
1684 err = -ENOMEM;
1685 goto exit;
1686 }
1687
1688 cfg->vlan_filter[i] = vlan_id;
1689 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1690 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1691 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1692
1693 exit:
1694 /* Enable VLAN promisc mode if vlan_filter empty */
1695 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1696 if (cfg->vlan_filter[i] != 0)
1697 break;
1698 }
1699
1700 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1701
1702 return err;
1703 }
1704
1705 static int
atl_enable_vlan_filter(struct rte_eth_dev * dev,int en)1706 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1707 {
1708 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1709 struct aq_hw_cfg_s *cfg =
1710 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1711 int i;
1712
1713 PMD_INIT_FUNC_TRACE();
1714
1715 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1716 if (cfg->vlan_filter[i])
1717 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1718 }
1719 return 0;
1720 }
1721
1722 static int
atl_vlan_offload_set(struct rte_eth_dev * dev,int mask)1723 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1724 {
1725 struct aq_hw_cfg_s *cfg =
1726 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1727 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1728 int ret = 0;
1729 int i;
1730
1731 PMD_INIT_FUNC_TRACE();
1732
1733 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1734
1735 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1736
1737 for (i = 0; i < dev->data->nb_rx_queues; i++)
1738 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1739
1740 if (mask & ETH_VLAN_EXTEND_MASK)
1741 ret = -ENOTSUP;
1742
1743 return ret;
1744 }
1745
1746 static int
atl_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)1747 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1748 uint16_t tpid)
1749 {
1750 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1751 int err = 0;
1752
1753 PMD_INIT_FUNC_TRACE();
1754
1755 switch (vlan_type) {
1756 case ETH_VLAN_TYPE_INNER:
1757 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1758 break;
1759 case ETH_VLAN_TYPE_OUTER:
1760 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1761 break;
1762 default:
1763 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1764 err = -ENOTSUP;
1765 }
1766
1767 return err;
1768 }
1769
1770 static void
atl_vlan_strip_queue_set(struct rte_eth_dev * dev,uint16_t queue_id,int on)1771 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1772 {
1773 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1774
1775 PMD_INIT_FUNC_TRACE();
1776
1777 if (queue_id > dev->data->nb_rx_queues) {
1778 PMD_DRV_LOG(ERR, "Invalid queue id");
1779 return;
1780 }
1781
1782 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1783 }
1784
1785 static int
atl_dev_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)1786 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1787 struct rte_ether_addr *mc_addr_set,
1788 uint32_t nb_mc_addr)
1789 {
1790 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1791 u32 i;
1792
1793 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1794 return -EINVAL;
1795
1796 /* Update whole uc filters table */
1797 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1798 u8 *mac_addr = NULL;
1799 u32 l = 0, h = 0;
1800
1801 if (i < nb_mc_addr) {
1802 mac_addr = mc_addr_set[i].addr_bytes;
1803 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1804 (mac_addr[4] << 8) | mac_addr[5];
1805 h = (mac_addr[0] << 8) | mac_addr[1];
1806 }
1807
1808 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1809 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1810 HW_ATL_B0_MAC_MIN + i);
1811 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1812 HW_ATL_B0_MAC_MIN + i);
1813 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1814 HW_ATL_B0_MAC_MIN + i);
1815 }
1816
1817 return 0;
1818 }
1819
1820 static int
atl_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1821 atl_reta_update(struct rte_eth_dev *dev,
1822 struct rte_eth_rss_reta_entry64 *reta_conf,
1823 uint16_t reta_size)
1824 {
1825 int i;
1826 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1827 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1828
1829 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1830 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1831 dev->data->nb_rx_queues - 1);
1832
1833 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1834 return 0;
1835 }
1836
1837 static int
atl_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1838 atl_reta_query(struct rte_eth_dev *dev,
1839 struct rte_eth_rss_reta_entry64 *reta_conf,
1840 uint16_t reta_size)
1841 {
1842 int i;
1843 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1844
1845 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1846 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1847 reta_conf->mask = ~0U;
1848 return 0;
1849 }
1850
1851 static int
atl_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1852 atl_rss_hash_update(struct rte_eth_dev *dev,
1853 struct rte_eth_rss_conf *rss_conf)
1854 {
1855 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1856 struct aq_hw_cfg_s *cfg =
1857 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1858 static u8 def_rss_key[40] = {
1859 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1860 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1861 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1862 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1863 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1864 };
1865
1866 cfg->is_rss = !!rss_conf->rss_hf;
1867 if (rss_conf->rss_key) {
1868 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1869 rss_conf->rss_key_len);
1870 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1871 } else {
1872 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1873 sizeof(def_rss_key));
1874 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1875 }
1876
1877 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1878 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1879 return 0;
1880 }
1881
1882 static int
atl_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1883 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1884 struct rte_eth_rss_conf *rss_conf)
1885 {
1886 struct aq_hw_cfg_s *cfg =
1887 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1888
1889 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1890 if (rss_conf->rss_key) {
1891 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1892 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1893 rss_conf->rss_key_len);
1894 }
1895
1896 return 0;
1897 }
1898
1899 static bool
is_device_supported(struct rte_eth_dev * dev,struct rte_pci_driver * drv)1900 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1901 {
1902 if (strcmp(dev->device->driver->name, drv->driver.name))
1903 return false;
1904
1905 return true;
1906 }
1907
1908 bool
is_atlantic_supported(struct rte_eth_dev * dev)1909 is_atlantic_supported(struct rte_eth_dev *dev)
1910 {
1911 return is_device_supported(dev, &rte_atl_pmd);
1912 }
1913
1914 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1915 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1916 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1917 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1918 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);
1919