1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2014-2018 Chelsio Communications.
3a9643ea8Slogwang * All rights reserved.
4a9643ea8Slogwang */
5a9643ea8Slogwang
6a9643ea8Slogwang #include <sys/queue.h>
7a9643ea8Slogwang #include <stdio.h>
8a9643ea8Slogwang #include <errno.h>
9a9643ea8Slogwang #include <stdint.h>
10a9643ea8Slogwang #include <string.h>
11a9643ea8Slogwang #include <unistd.h>
12a9643ea8Slogwang #include <stdarg.h>
13a9643ea8Slogwang #include <inttypes.h>
14a9643ea8Slogwang #include <netinet/in.h>
15a9643ea8Slogwang
16a9643ea8Slogwang #include <rte_byteorder.h>
17a9643ea8Slogwang #include <rte_common.h>
18a9643ea8Slogwang #include <rte_cycles.h>
19a9643ea8Slogwang #include <rte_interrupts.h>
20a9643ea8Slogwang #include <rte_log.h>
21a9643ea8Slogwang #include <rte_debug.h>
22a9643ea8Slogwang #include <rte_pci.h>
232bfe3f2eSlogwang #include <rte_bus_pci.h>
24a9643ea8Slogwang #include <rte_atomic.h>
25a9643ea8Slogwang #include <rte_branch_prediction.h>
26a9643ea8Slogwang #include <rte_memory.h>
27a9643ea8Slogwang #include <rte_tailq.h>
28a9643ea8Slogwang #include <rte_eal.h>
29a9643ea8Slogwang #include <rte_alarm.h>
30a9643ea8Slogwang #include <rte_ether.h>
31d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
322bfe3f2eSlogwang #include <rte_ethdev_pci.h>
33a9643ea8Slogwang #include <rte_malloc.h>
34a9643ea8Slogwang #include <rte_random.h>
35a9643ea8Slogwang #include <rte_dev.h>
36a9643ea8Slogwang
37a9643ea8Slogwang #include "cxgbe.h"
38d30ea906Sjfb8856606 #include "cxgbe_pfvf.h"
39d30ea906Sjfb8856606 #include "cxgbe_flow.h"
40a9643ea8Slogwang
41a9643ea8Slogwang /*
42a9643ea8Slogwang * Macros needed to support the PCI Device ID Table ...
43a9643ea8Slogwang */
44a9643ea8Slogwang #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
452bfe3f2eSlogwang static const struct rte_pci_id cxgb4_pci_tbl[] = {
46a9643ea8Slogwang #define CH_PCI_DEVICE_ID_FUNCTION 0x4
47a9643ea8Slogwang
48a9643ea8Slogwang #define PCI_VENDOR_ID_CHELSIO 0x1425
49a9643ea8Slogwang
50a9643ea8Slogwang #define CH_PCI_ID_TABLE_ENTRY(devid) \
51a9643ea8Slogwang { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
52a9643ea8Slogwang
53a9643ea8Slogwang #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
54a9643ea8Slogwang { .vendor_id = 0, } \
55a9643ea8Slogwang }
56a9643ea8Slogwang
57a9643ea8Slogwang /*
58a9643ea8Slogwang *... and the PCI ID Table itself ...
59a9643ea8Slogwang */
604418919fSjohnjiang #include "base/t4_pci_id_tbl.h"
61a9643ea8Slogwang
cxgbe_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)62d30ea906Sjfb8856606 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
63a9643ea8Slogwang uint16_t nb_pkts)
64a9643ea8Slogwang {
65a9643ea8Slogwang struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
66a9643ea8Slogwang uint16_t pkts_sent, pkts_remain;
67a9643ea8Slogwang uint16_t total_sent = 0;
684418919fSjohnjiang uint16_t idx = 0;
69a9643ea8Slogwang int ret = 0;
70a9643ea8Slogwang
71a9643ea8Slogwang t4_os_lock(&txq->txq_lock);
72a9643ea8Slogwang /* free up desc from already completed tx */
73a9643ea8Slogwang reclaim_completed_tx(&txq->q);
740c6bd470Sfengbojiang if (unlikely(!nb_pkts))
750c6bd470Sfengbojiang goto out_unlock;
760c6bd470Sfengbojiang
774418919fSjohnjiang rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
78a9643ea8Slogwang while (total_sent < nb_pkts) {
79a9643ea8Slogwang pkts_remain = nb_pkts - total_sent;
80a9643ea8Slogwang
81a9643ea8Slogwang for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
824418919fSjohnjiang idx = total_sent + pkts_sent;
834418919fSjohnjiang if ((idx + 1) < nb_pkts)
844418919fSjohnjiang rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
854418919fSjohnjiang volatile void *));
864418919fSjohnjiang ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
87a9643ea8Slogwang if (ret < 0)
88a9643ea8Slogwang break;
89a9643ea8Slogwang }
90a9643ea8Slogwang if (!pkts_sent)
91a9643ea8Slogwang break;
92a9643ea8Slogwang total_sent += pkts_sent;
93a9643ea8Slogwang /* reclaim as much as possible */
94a9643ea8Slogwang reclaim_completed_tx(&txq->q);
95a9643ea8Slogwang }
96a9643ea8Slogwang
970c6bd470Sfengbojiang out_unlock:
98a9643ea8Slogwang t4_os_unlock(&txq->txq_lock);
99a9643ea8Slogwang return total_sent;
100a9643ea8Slogwang }
101a9643ea8Slogwang
cxgbe_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)102d30ea906Sjfb8856606 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
103a9643ea8Slogwang uint16_t nb_pkts)
104a9643ea8Slogwang {
105a9643ea8Slogwang struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
106a9643ea8Slogwang unsigned int work_done;
107a9643ea8Slogwang
108a9643ea8Slogwang if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
109a9643ea8Slogwang dev_err(adapter, "error in cxgbe poll\n");
110a9643ea8Slogwang
111a9643ea8Slogwang return work_done;
112a9643ea8Slogwang }
113a9643ea8Slogwang
cxgbe_dev_info_get(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * device_info)1144418919fSjohnjiang int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
115a9643ea8Slogwang struct rte_eth_dev_info *device_info)
116a9643ea8Slogwang {
1174b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
118a9643ea8Slogwang struct adapter *adapter = pi->adapter;
119a9643ea8Slogwang
120a9643ea8Slogwang static const struct rte_eth_desc_lim cxgbe_desc_lim = {
121a9643ea8Slogwang .nb_max = CXGBE_MAX_RING_DESC_SIZE,
122a9643ea8Slogwang .nb_min = CXGBE_MIN_RING_DESC_SIZE,
123a9643ea8Slogwang .nb_align = 1,
124a9643ea8Slogwang };
125a9643ea8Slogwang
126a9643ea8Slogwang device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
127a9643ea8Slogwang device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
128*2d9fd380Sjfb8856606 device_info->max_rx_queues = adapter->sge.max_ethqsets;
129*2d9fd380Sjfb8856606 device_info->max_tx_queues = adapter->sge.max_ethqsets;
130a9643ea8Slogwang device_info->max_mac_addrs = 1;
131a9643ea8Slogwang /* XXX: For now we support one MAC/port */
132a9643ea8Slogwang device_info->max_vfs = adapter->params.arch.vfcount;
133a9643ea8Slogwang device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
134a9643ea8Slogwang
135d30ea906Sjfb8856606 device_info->rx_queue_offload_capa = 0UL;
136d30ea906Sjfb8856606 device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
137a9643ea8Slogwang
138d30ea906Sjfb8856606 device_info->tx_queue_offload_capa = 0UL;
139d30ea906Sjfb8856606 device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
140a9643ea8Slogwang
141a9643ea8Slogwang device_info->reta_size = pi->rss_size;
142d30ea906Sjfb8856606 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
143d30ea906Sjfb8856606 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
144a9643ea8Slogwang
145a9643ea8Slogwang device_info->rx_desc_lim = cxgbe_desc_lim;
146a9643ea8Slogwang device_info->tx_desc_lim = cxgbe_desc_lim;
1472bfe3f2eSlogwang cxgbe_get_speed_caps(pi, &device_info->speed_capa);
1484418919fSjohnjiang
1494418919fSjohnjiang return 0;
150a9643ea8Slogwang }
151a9643ea8Slogwang
cxgbe_dev_promiscuous_enable(struct rte_eth_dev * eth_dev)1524418919fSjohnjiang int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
153a9643ea8Slogwang {
1544b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
155a9643ea8Slogwang struct adapter *adapter = pi->adapter;
156a9643ea8Slogwang
1574418919fSjohnjiang return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
158a9643ea8Slogwang 1, -1, 1, -1, false);
159a9643ea8Slogwang }
160a9643ea8Slogwang
cxgbe_dev_promiscuous_disable(struct rte_eth_dev * eth_dev)1614418919fSjohnjiang int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
162a9643ea8Slogwang {
1634b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
164a9643ea8Slogwang struct adapter *adapter = pi->adapter;
165a9643ea8Slogwang
1664418919fSjohnjiang return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
167a9643ea8Slogwang 0, -1, 1, -1, false);
168a9643ea8Slogwang }
169a9643ea8Slogwang
cxgbe_dev_allmulticast_enable(struct rte_eth_dev * eth_dev)1704418919fSjohnjiang int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
171a9643ea8Slogwang {
1724b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
173a9643ea8Slogwang struct adapter *adapter = pi->adapter;
174a9643ea8Slogwang
175a9643ea8Slogwang /* TODO: address filters ?? */
176a9643ea8Slogwang
1774418919fSjohnjiang return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
178a9643ea8Slogwang -1, 1, 1, -1, false);
179a9643ea8Slogwang }
180a9643ea8Slogwang
cxgbe_dev_allmulticast_disable(struct rte_eth_dev * eth_dev)1814418919fSjohnjiang int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
182a9643ea8Slogwang {
1834b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
184a9643ea8Slogwang struct adapter *adapter = pi->adapter;
185a9643ea8Slogwang
186a9643ea8Slogwang /* TODO: address filters ?? */
187a9643ea8Slogwang
1884418919fSjohnjiang return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
189a9643ea8Slogwang -1, 0, 1, -1, false);
190a9643ea8Slogwang }
191a9643ea8Slogwang
cxgbe_dev_link_update(struct rte_eth_dev * eth_dev,int wait_to_complete)192d30ea906Sjfb8856606 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
193d30ea906Sjfb8856606 int wait_to_complete)
194a9643ea8Slogwang {
1954b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
196a9643ea8Slogwang struct adapter *adapter = pi->adapter;
197a9643ea8Slogwang struct sge *s = &adapter->sge;
198d30ea906Sjfb8856606 struct rte_eth_link new_link = { 0 };
199d30ea906Sjfb8856606 unsigned int i, work_done, budget = 32;
200d30ea906Sjfb8856606 u8 old_link = pi->link_cfg.link_ok;
201a9643ea8Slogwang
202d30ea906Sjfb8856606 for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
2034418919fSjohnjiang if (!s->fw_evtq.desc)
2044418919fSjohnjiang break;
2054418919fSjohnjiang
206a9643ea8Slogwang cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
207a9643ea8Slogwang
208d30ea906Sjfb8856606 /* Exit if link status changed or always forced up */
2091646932aSjfb8856606 if (pi->link_cfg.link_ok != old_link ||
2101646932aSjfb8856606 cxgbe_force_linkup(adapter))
211d30ea906Sjfb8856606 break;
212a9643ea8Slogwang
213d30ea906Sjfb8856606 if (!wait_to_complete)
214d30ea906Sjfb8856606 break;
215d30ea906Sjfb8856606
216d30ea906Sjfb8856606 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
217d30ea906Sjfb8856606 }
218d30ea906Sjfb8856606
2191646932aSjfb8856606 new_link.link_status = cxgbe_force_linkup(adapter) ?
220d30ea906Sjfb8856606 ETH_LINK_UP : pi->link_cfg.link_ok;
221d30ea906Sjfb8856606 new_link.link_autoneg = pi->link_cfg.autoneg;
222d30ea906Sjfb8856606 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
223d30ea906Sjfb8856606 new_link.link_speed = pi->link_cfg.speed;
224d30ea906Sjfb8856606
225d30ea906Sjfb8856606 return rte_eth_linkstatus_set(eth_dev, &new_link);
226d30ea906Sjfb8856606 }
227d30ea906Sjfb8856606
228d30ea906Sjfb8856606 /**
229d30ea906Sjfb8856606 * Set device link up.
230d30ea906Sjfb8856606 */
cxgbe_dev_set_link_up(struct rte_eth_dev * dev)231d30ea906Sjfb8856606 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
232d30ea906Sjfb8856606 {
2334b05018fSfengbojiang struct port_info *pi = dev->data->dev_private;
234d30ea906Sjfb8856606 struct adapter *adapter = pi->adapter;
235d30ea906Sjfb8856606 unsigned int work_done, budget = 32;
236d30ea906Sjfb8856606 struct sge *s = &adapter->sge;
237d30ea906Sjfb8856606 int ret;
238d30ea906Sjfb8856606
2394418919fSjohnjiang if (!s->fw_evtq.desc)
2404418919fSjohnjiang return -ENOMEM;
2414418919fSjohnjiang
242d30ea906Sjfb8856606 /* Flush all link events */
243d30ea906Sjfb8856606 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
244d30ea906Sjfb8856606
245d30ea906Sjfb8856606 /* If link already up, nothing to do */
246d30ea906Sjfb8856606 if (pi->link_cfg.link_ok)
247d30ea906Sjfb8856606 return 0;
248d30ea906Sjfb8856606
249d30ea906Sjfb8856606 ret = cxgbe_set_link_status(pi, true);
250d30ea906Sjfb8856606 if (ret)
251d30ea906Sjfb8856606 return ret;
252d30ea906Sjfb8856606
253d30ea906Sjfb8856606 cxgbe_dev_link_update(dev, 1);
254a9643ea8Slogwang return 0;
255a9643ea8Slogwang }
256a9643ea8Slogwang
257d30ea906Sjfb8856606 /**
258d30ea906Sjfb8856606 * Set device link down.
259d30ea906Sjfb8856606 */
cxgbe_dev_set_link_down(struct rte_eth_dev * dev)260d30ea906Sjfb8856606 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
261d30ea906Sjfb8856606 {
2624b05018fSfengbojiang struct port_info *pi = dev->data->dev_private;
263d30ea906Sjfb8856606 struct adapter *adapter = pi->adapter;
264d30ea906Sjfb8856606 unsigned int work_done, budget = 32;
265d30ea906Sjfb8856606 struct sge *s = &adapter->sge;
266d30ea906Sjfb8856606 int ret;
267d30ea906Sjfb8856606
2684418919fSjohnjiang if (!s->fw_evtq.desc)
2694418919fSjohnjiang return -ENOMEM;
2704418919fSjohnjiang
271d30ea906Sjfb8856606 /* Flush all link events */
272d30ea906Sjfb8856606 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
273d30ea906Sjfb8856606
274d30ea906Sjfb8856606 /* If link already down, nothing to do */
275d30ea906Sjfb8856606 if (!pi->link_cfg.link_ok)
276d30ea906Sjfb8856606 return 0;
277d30ea906Sjfb8856606
278d30ea906Sjfb8856606 ret = cxgbe_set_link_status(pi, false);
279d30ea906Sjfb8856606 if (ret)
280d30ea906Sjfb8856606 return ret;
281d30ea906Sjfb8856606
282d30ea906Sjfb8856606 cxgbe_dev_link_update(dev, 0);
283d30ea906Sjfb8856606 return 0;
284d30ea906Sjfb8856606 }
285d30ea906Sjfb8856606
cxgbe_dev_mtu_set(struct rte_eth_dev * eth_dev,uint16_t mtu)286d30ea906Sjfb8856606 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
287a9643ea8Slogwang {
2884b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
289a9643ea8Slogwang struct adapter *adapter = pi->adapter;
290a9643ea8Slogwang struct rte_eth_dev_info dev_info;
291a9643ea8Slogwang int err;
2924418919fSjohnjiang uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
293a9643ea8Slogwang
2944418919fSjohnjiang err = cxgbe_dev_info_get(eth_dev, &dev_info);
2954418919fSjohnjiang if (err != 0)
2964418919fSjohnjiang return err;
297a9643ea8Slogwang
2984418919fSjohnjiang /* Must accommodate at least RTE_ETHER_MIN_MTU */
2994418919fSjohnjiang if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
300a9643ea8Slogwang return -EINVAL;
301a9643ea8Slogwang
302a9643ea8Slogwang /* set to jumbo mode if needed */
3034418919fSjohnjiang if (new_mtu > RTE_ETHER_MAX_LEN)
304d30ea906Sjfb8856606 eth_dev->data->dev_conf.rxmode.offloads |=
305d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME;
306a9643ea8Slogwang else
307d30ea906Sjfb8856606 eth_dev->data->dev_conf.rxmode.offloads &=
308d30ea906Sjfb8856606 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
309a9643ea8Slogwang
310a9643ea8Slogwang err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
311a9643ea8Slogwang -1, -1, true);
312a9643ea8Slogwang if (!err)
313a9643ea8Slogwang eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
314a9643ea8Slogwang
315a9643ea8Slogwang return err;
316a9643ea8Slogwang }
317a9643ea8Slogwang
318a9643ea8Slogwang /*
319a9643ea8Slogwang * Stop device.
320a9643ea8Slogwang */
cxgbe_dev_close(struct rte_eth_dev * eth_dev)321*2d9fd380Sjfb8856606 int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
322a9643ea8Slogwang {
323*2d9fd380Sjfb8856606 struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
324a9643ea8Slogwang struct adapter *adapter = pi->adapter;
325*2d9fd380Sjfb8856606 u8 i;
326a9643ea8Slogwang
327a9643ea8Slogwang CXGBE_FUNC_TRACE();
328a9643ea8Slogwang
329*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
330*2d9fd380Sjfb8856606 return 0;
331*2d9fd380Sjfb8856606
332a9643ea8Slogwang if (!(adapter->flags & FULL_INIT_DONE))
333*2d9fd380Sjfb8856606 return 0;
334*2d9fd380Sjfb8856606
335*2d9fd380Sjfb8856606 if (!pi->viid)
336*2d9fd380Sjfb8856606 return 0;
337a9643ea8Slogwang
338a9643ea8Slogwang cxgbe_down(pi);
3390c6bd470Sfengbojiang t4_sge_eth_release_queues(pi);
340*2d9fd380Sjfb8856606 t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
341*2d9fd380Sjfb8856606 pi->viid = 0;
342*2d9fd380Sjfb8856606
343*2d9fd380Sjfb8856606 /* Free up the adapter-wide resources only after all the ports
344*2d9fd380Sjfb8856606 * under this PF have been closed.
345*2d9fd380Sjfb8856606 */
346*2d9fd380Sjfb8856606 for_each_port(adapter, i) {
347*2d9fd380Sjfb8856606 temp_pi = adap2pinfo(adapter, i);
348*2d9fd380Sjfb8856606 if (temp_pi->viid)
349*2d9fd380Sjfb8856606 return 0;
350*2d9fd380Sjfb8856606 }
351*2d9fd380Sjfb8856606
352*2d9fd380Sjfb8856606 cxgbe_close(adapter);
353*2d9fd380Sjfb8856606 rte_free(adapter);
354*2d9fd380Sjfb8856606
355*2d9fd380Sjfb8856606 return 0;
356a9643ea8Slogwang }
357a9643ea8Slogwang
358a9643ea8Slogwang /* Start the device.
359a9643ea8Slogwang * It returns 0 on success.
360a9643ea8Slogwang */
cxgbe_dev_start(struct rte_eth_dev * eth_dev)361d30ea906Sjfb8856606 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
362a9643ea8Slogwang {
3634b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
364d30ea906Sjfb8856606 struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode;
365a9643ea8Slogwang struct adapter *adapter = pi->adapter;
366a9643ea8Slogwang int err = 0, i;
367a9643ea8Slogwang
368a9643ea8Slogwang CXGBE_FUNC_TRACE();
369a9643ea8Slogwang
370a9643ea8Slogwang /*
371a9643ea8Slogwang * If we don't have a connection to the firmware there's nothing we
372a9643ea8Slogwang * can do.
373a9643ea8Slogwang */
374a9643ea8Slogwang if (!(adapter->flags & FW_OK)) {
375a9643ea8Slogwang err = -ENXIO;
376a9643ea8Slogwang goto out;
377a9643ea8Slogwang }
378a9643ea8Slogwang
379a9643ea8Slogwang if (!(adapter->flags & FULL_INIT_DONE)) {
380a9643ea8Slogwang err = cxgbe_up(adapter);
381a9643ea8Slogwang if (err < 0)
382a9643ea8Slogwang goto out;
383a9643ea8Slogwang }
384a9643ea8Slogwang
385d30ea906Sjfb8856606 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
386d30ea906Sjfb8856606 eth_dev->data->scattered_rx = 1;
387d30ea906Sjfb8856606 else
388d30ea906Sjfb8856606 eth_dev->data->scattered_rx = 0;
389d30ea906Sjfb8856606
3902bfe3f2eSlogwang cxgbe_enable_rx_queues(pi);
3912bfe3f2eSlogwang
3921646932aSjfb8856606 err = cxgbe_setup_rss(pi);
393a9643ea8Slogwang if (err)
394a9643ea8Slogwang goto out;
395a9643ea8Slogwang
396a9643ea8Slogwang for (i = 0; i < pi->n_tx_qsets; i++) {
397a9643ea8Slogwang err = cxgbe_dev_tx_queue_start(eth_dev, i);
398a9643ea8Slogwang if (err)
399a9643ea8Slogwang goto out;
400a9643ea8Slogwang }
401a9643ea8Slogwang
402a9643ea8Slogwang for (i = 0; i < pi->n_rx_qsets; i++) {
403a9643ea8Slogwang err = cxgbe_dev_rx_queue_start(eth_dev, i);
404a9643ea8Slogwang if (err)
405a9643ea8Slogwang goto out;
406a9643ea8Slogwang }
407a9643ea8Slogwang
4081646932aSjfb8856606 err = cxgbe_link_start(pi);
409a9643ea8Slogwang if (err)
410a9643ea8Slogwang goto out;
411a9643ea8Slogwang
412a9643ea8Slogwang out:
413a9643ea8Slogwang return err;
414a9643ea8Slogwang }
415a9643ea8Slogwang
416a9643ea8Slogwang /*
417a9643ea8Slogwang * Stop device: disable rx and tx functions to allow for reconfiguring.
418a9643ea8Slogwang */
cxgbe_dev_stop(struct rte_eth_dev * eth_dev)419*2d9fd380Sjfb8856606 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
420a9643ea8Slogwang {
4214b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
422a9643ea8Slogwang struct adapter *adapter = pi->adapter;
423a9643ea8Slogwang
424a9643ea8Slogwang CXGBE_FUNC_TRACE();
425a9643ea8Slogwang
426a9643ea8Slogwang if (!(adapter->flags & FULL_INIT_DONE))
427*2d9fd380Sjfb8856606 return 0;
428a9643ea8Slogwang
429a9643ea8Slogwang cxgbe_down(pi);
430a9643ea8Slogwang
431a9643ea8Slogwang /*
432a9643ea8Slogwang * We clear queues only if both tx and rx path of the port
433a9643ea8Slogwang * have been disabled
434a9643ea8Slogwang */
435a9643ea8Slogwang t4_sge_eth_clear_queues(pi);
436d30ea906Sjfb8856606 eth_dev->data->scattered_rx = 0;
437*2d9fd380Sjfb8856606
438*2d9fd380Sjfb8856606 return 0;
439a9643ea8Slogwang }
440a9643ea8Slogwang
cxgbe_dev_configure(struct rte_eth_dev * eth_dev)441d30ea906Sjfb8856606 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
442a9643ea8Slogwang {
4434b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
444a9643ea8Slogwang struct adapter *adapter = pi->adapter;
445a9643ea8Slogwang int err;
446a9643ea8Slogwang
447a9643ea8Slogwang CXGBE_FUNC_TRACE();
448a9643ea8Slogwang
4494418919fSjohnjiang if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
4504418919fSjohnjiang eth_dev->data->dev_conf.rxmode.offloads |=
4514418919fSjohnjiang DEV_RX_OFFLOAD_RSS_HASH;
4524418919fSjohnjiang
453a9643ea8Slogwang if (!(adapter->flags & FW_QUEUE_BOUND)) {
4541646932aSjfb8856606 err = cxgbe_setup_sge_fwevtq(adapter);
455a9643ea8Slogwang if (err)
456a9643ea8Slogwang return err;
457a9643ea8Slogwang adapter->flags |= FW_QUEUE_BOUND;
458d30ea906Sjfb8856606 if (is_pf4(adapter)) {
4591646932aSjfb8856606 err = cxgbe_setup_sge_ctrl_txq(adapter);
460d30ea906Sjfb8856606 if (err)
461d30ea906Sjfb8856606 return err;
462d30ea906Sjfb8856606 }
463a9643ea8Slogwang }
464a9643ea8Slogwang
4651646932aSjfb8856606 err = cxgbe_cfg_queue_count(eth_dev);
466a9643ea8Slogwang if (err)
467a9643ea8Slogwang return err;
468a9643ea8Slogwang
469a9643ea8Slogwang return 0;
470a9643ea8Slogwang }
471a9643ea8Slogwang
cxgbe_dev_tx_queue_start(struct rte_eth_dev * eth_dev,uint16_t tx_queue_id)472d30ea906Sjfb8856606 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
473a9643ea8Slogwang {
474a9643ea8Slogwang int ret;
475a9643ea8Slogwang struct sge_eth_txq *txq = (struct sge_eth_txq *)
476a9643ea8Slogwang (eth_dev->data->tx_queues[tx_queue_id]);
477a9643ea8Slogwang
478a9643ea8Slogwang dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
479a9643ea8Slogwang
480a9643ea8Slogwang ret = t4_sge_eth_txq_start(txq);
481a9643ea8Slogwang if (ret == 0)
482a9643ea8Slogwang eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
483a9643ea8Slogwang
484a9643ea8Slogwang return ret;
485a9643ea8Slogwang }
486a9643ea8Slogwang
cxgbe_dev_tx_queue_stop(struct rte_eth_dev * eth_dev,uint16_t tx_queue_id)487d30ea906Sjfb8856606 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
488a9643ea8Slogwang {
489a9643ea8Slogwang int ret;
490a9643ea8Slogwang struct sge_eth_txq *txq = (struct sge_eth_txq *)
491a9643ea8Slogwang (eth_dev->data->tx_queues[tx_queue_id]);
492a9643ea8Slogwang
493a9643ea8Slogwang dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
494a9643ea8Slogwang
495a9643ea8Slogwang ret = t4_sge_eth_txq_stop(txq);
496a9643ea8Slogwang if (ret == 0)
497a9643ea8Slogwang eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
498a9643ea8Slogwang
499a9643ea8Slogwang return ret;
500a9643ea8Slogwang }
501a9643ea8Slogwang
cxgbe_dev_tx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf __rte_unused)502d30ea906Sjfb8856606 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
503a9643ea8Slogwang uint16_t queue_idx, uint16_t nb_desc,
504a9643ea8Slogwang unsigned int socket_id,
505d30ea906Sjfb8856606 const struct rte_eth_txconf *tx_conf __rte_unused)
506a9643ea8Slogwang {
5074b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
508a9643ea8Slogwang struct adapter *adapter = pi->adapter;
509a9643ea8Slogwang struct sge *s = &adapter->sge;
510a9643ea8Slogwang unsigned int temp_nb_desc;
511*2d9fd380Sjfb8856606 struct sge_eth_txq *txq;
512*2d9fd380Sjfb8856606 int err = 0;
513a9643ea8Slogwang
514*2d9fd380Sjfb8856606 txq = &s->ethtxq[pi->first_txqset + queue_idx];
515a9643ea8Slogwang dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
516a9643ea8Slogwang __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
517*2d9fd380Sjfb8856606 socket_id, pi->first_txqset);
518a9643ea8Slogwang
519a9643ea8Slogwang /* Free up the existing queue */
520a9643ea8Slogwang if (eth_dev->data->tx_queues[queue_idx]) {
521a9643ea8Slogwang cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
522a9643ea8Slogwang eth_dev->data->tx_queues[queue_idx] = NULL;
523a9643ea8Slogwang }
524a9643ea8Slogwang
525a9643ea8Slogwang eth_dev->data->tx_queues[queue_idx] = (void *)txq;
526a9643ea8Slogwang
527a9643ea8Slogwang /* Sanity Checking
528a9643ea8Slogwang *
529a9643ea8Slogwang * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
530a9643ea8Slogwang */
531a9643ea8Slogwang temp_nb_desc = nb_desc;
532a9643ea8Slogwang if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
533a9643ea8Slogwang dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
534a9643ea8Slogwang __func__, CXGBE_MIN_RING_DESC_SIZE,
535a9643ea8Slogwang CXGBE_DEFAULT_TX_DESC_SIZE);
536a9643ea8Slogwang temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
537a9643ea8Slogwang } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
538a9643ea8Slogwang dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
539a9643ea8Slogwang __func__, CXGBE_MIN_RING_DESC_SIZE,
540a9643ea8Slogwang CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
541a9643ea8Slogwang return -(EINVAL);
542a9643ea8Slogwang }
543a9643ea8Slogwang
544a9643ea8Slogwang txq->q.size = temp_nb_desc;
545a9643ea8Slogwang
546a9643ea8Slogwang err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
547a9643ea8Slogwang s->fw_evtq.cntxt_id, socket_id);
548a9643ea8Slogwang
549d30ea906Sjfb8856606 dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
550d30ea906Sjfb8856606 __func__, txq->q.cntxt_id, txq->q.abs_id, err);
551a9643ea8Slogwang return err;
552a9643ea8Slogwang }
553a9643ea8Slogwang
cxgbe_dev_tx_queue_release(void * q)554d30ea906Sjfb8856606 void cxgbe_dev_tx_queue_release(void *q)
555a9643ea8Slogwang {
556a9643ea8Slogwang struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
557a9643ea8Slogwang
558a9643ea8Slogwang if (txq) {
559a9643ea8Slogwang struct port_info *pi = (struct port_info *)
560a9643ea8Slogwang (txq->eth_dev->data->dev_private);
561a9643ea8Slogwang struct adapter *adap = pi->adapter;
562a9643ea8Slogwang
563a9643ea8Slogwang dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
564a9643ea8Slogwang __func__, pi->port_id, txq->q.cntxt_id);
565a9643ea8Slogwang
566a9643ea8Slogwang t4_sge_eth_txq_release(adap, txq);
567a9643ea8Slogwang }
568a9643ea8Slogwang }
569a9643ea8Slogwang
cxgbe_dev_rx_queue_start(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)570d30ea906Sjfb8856606 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
571a9643ea8Slogwang {
5724b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
573a9643ea8Slogwang struct adapter *adap = pi->adapter;
574*2d9fd380Sjfb8856606 struct sge_eth_rxq *rxq;
575*2d9fd380Sjfb8856606 int ret;
576a9643ea8Slogwang
577a9643ea8Slogwang dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
578a9643ea8Slogwang __func__, pi->port_id, rx_queue_id);
579a9643ea8Slogwang
580*2d9fd380Sjfb8856606 rxq = eth_dev->data->rx_queues[rx_queue_id];
581*2d9fd380Sjfb8856606 ret = t4_sge_eth_rxq_start(adap, rxq);
582a9643ea8Slogwang if (ret == 0)
583a9643ea8Slogwang eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
584a9643ea8Slogwang
585a9643ea8Slogwang return ret;
586a9643ea8Slogwang }
587a9643ea8Slogwang
cxgbe_dev_rx_queue_stop(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)588d30ea906Sjfb8856606 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
589a9643ea8Slogwang {
5904b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
591a9643ea8Slogwang struct adapter *adap = pi->adapter;
592*2d9fd380Sjfb8856606 struct sge_eth_rxq *rxq;
593*2d9fd380Sjfb8856606 int ret;
594a9643ea8Slogwang
595a9643ea8Slogwang dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
596a9643ea8Slogwang __func__, pi->port_id, rx_queue_id);
597a9643ea8Slogwang
598*2d9fd380Sjfb8856606 rxq = eth_dev->data->rx_queues[rx_queue_id];
599*2d9fd380Sjfb8856606 ret = t4_sge_eth_rxq_stop(adap, rxq);
600a9643ea8Slogwang if (ret == 0)
601a9643ea8Slogwang eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
602a9643ea8Slogwang
603a9643ea8Slogwang return ret;
604a9643ea8Slogwang }
605a9643ea8Slogwang
cxgbe_dev_rx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mp)606d30ea906Sjfb8856606 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
607a9643ea8Slogwang uint16_t queue_idx, uint16_t nb_desc,
608a9643ea8Slogwang unsigned int socket_id,
609d30ea906Sjfb8856606 const struct rte_eth_rxconf *rx_conf __rte_unused,
610a9643ea8Slogwang struct rte_mempool *mp)
611a9643ea8Slogwang {
612*2d9fd380Sjfb8856606 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
6134b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
614a9643ea8Slogwang struct adapter *adapter = pi->adapter;
615a9643ea8Slogwang struct rte_eth_dev_info dev_info;
616*2d9fd380Sjfb8856606 struct sge *s = &adapter->sge;
617*2d9fd380Sjfb8856606 unsigned int temp_nb_desc;
618*2d9fd380Sjfb8856606 int err = 0, msi_idx = 0;
619*2d9fd380Sjfb8856606 struct sge_eth_rxq *rxq;
620a9643ea8Slogwang
621*2d9fd380Sjfb8856606 rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
622a9643ea8Slogwang dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
623a9643ea8Slogwang __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
624a9643ea8Slogwang socket_id, mp);
625a9643ea8Slogwang
6264418919fSjohnjiang err = cxgbe_dev_info_get(eth_dev, &dev_info);
6274418919fSjohnjiang if (err != 0) {
6284418919fSjohnjiang dev_err(adap, "%s: error during getting ethernet device info",
6294418919fSjohnjiang __func__);
6304418919fSjohnjiang return err;
6314418919fSjohnjiang }
632a9643ea8Slogwang
6334418919fSjohnjiang /* Must accommodate at least RTE_ETHER_MIN_MTU */
634a9643ea8Slogwang if ((pkt_len < dev_info.min_rx_bufsize) ||
635a9643ea8Slogwang (pkt_len > dev_info.max_rx_pktlen)) {
636a9643ea8Slogwang dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
637a9643ea8Slogwang __func__, dev_info.min_rx_bufsize,
638a9643ea8Slogwang dev_info.max_rx_pktlen);
639a9643ea8Slogwang return -EINVAL;
640a9643ea8Slogwang }
641a9643ea8Slogwang
642a9643ea8Slogwang /* Free up the existing queue */
643a9643ea8Slogwang if (eth_dev->data->rx_queues[queue_idx]) {
644a9643ea8Slogwang cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
645a9643ea8Slogwang eth_dev->data->rx_queues[queue_idx] = NULL;
646a9643ea8Slogwang }
647a9643ea8Slogwang
648a9643ea8Slogwang eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
649a9643ea8Slogwang
650a9643ea8Slogwang /* Sanity Checking
651a9643ea8Slogwang *
652a9643ea8Slogwang * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
653a9643ea8Slogwang */
654a9643ea8Slogwang temp_nb_desc = nb_desc;
655a9643ea8Slogwang if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
656a9643ea8Slogwang dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
657a9643ea8Slogwang __func__, CXGBE_MIN_RING_DESC_SIZE,
658a9643ea8Slogwang CXGBE_DEFAULT_RX_DESC_SIZE);
659a9643ea8Slogwang temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
660a9643ea8Slogwang } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
661a9643ea8Slogwang dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
662a9643ea8Slogwang __func__, CXGBE_MIN_RING_DESC_SIZE,
663a9643ea8Slogwang CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
664a9643ea8Slogwang return -(EINVAL);
665a9643ea8Slogwang }
666a9643ea8Slogwang
667a9643ea8Slogwang rxq->rspq.size = temp_nb_desc;
668a9643ea8Slogwang if ((&rxq->fl) != NULL)
669a9643ea8Slogwang rxq->fl.size = temp_nb_desc;
670a9643ea8Slogwang
671a9643ea8Slogwang /* Set to jumbo mode if necessary */
6724418919fSjohnjiang if (pkt_len > RTE_ETHER_MAX_LEN)
673d30ea906Sjfb8856606 eth_dev->data->dev_conf.rxmode.offloads |=
674d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME;
675a9643ea8Slogwang else
676d30ea906Sjfb8856606 eth_dev->data->dev_conf.rxmode.offloads &=
677d30ea906Sjfb8856606 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
678a9643ea8Slogwang
679a9643ea8Slogwang err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
6804418919fSjohnjiang &rxq->fl, NULL,
681d30ea906Sjfb8856606 is_pf4(adapter) ?
682d30ea906Sjfb8856606 t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
683a9643ea8Slogwang queue_idx, socket_id);
684a9643ea8Slogwang
685d30ea906Sjfb8856606 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
686d30ea906Sjfb8856606 __func__, err, pi->port_id, rxq->rspq.cntxt_id,
687d30ea906Sjfb8856606 rxq->rspq.abs_id);
688a9643ea8Slogwang return err;
689a9643ea8Slogwang }
690a9643ea8Slogwang
cxgbe_dev_rx_queue_release(void * q)691d30ea906Sjfb8856606 void cxgbe_dev_rx_queue_release(void *q)
692a9643ea8Slogwang {
693a9643ea8Slogwang struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
694a9643ea8Slogwang
695*2d9fd380Sjfb8856606 if (rxq) {
696a9643ea8Slogwang struct port_info *pi = (struct port_info *)
697*2d9fd380Sjfb8856606 (rxq->rspq.eth_dev->data->dev_private);
698a9643ea8Slogwang struct adapter *adap = pi->adapter;
699a9643ea8Slogwang
700a9643ea8Slogwang dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
701a9643ea8Slogwang __func__, pi->port_id, rxq->rspq.cntxt_id);
702a9643ea8Slogwang
703a9643ea8Slogwang t4_sge_eth_rxq_release(adap, rxq);
704a9643ea8Slogwang }
705a9643ea8Slogwang }
706a9643ea8Slogwang
707a9643ea8Slogwang /*
708a9643ea8Slogwang * Get port statistics.
709a9643ea8Slogwang */
cxgbe_dev_stats_get(struct rte_eth_dev * eth_dev,struct rte_eth_stats * eth_stats)7102bfe3f2eSlogwang static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
711a9643ea8Slogwang struct rte_eth_stats *eth_stats)
712a9643ea8Slogwang {
7134b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
714a9643ea8Slogwang struct adapter *adapter = pi->adapter;
715a9643ea8Slogwang struct sge *s = &adapter->sge;
716a9643ea8Slogwang struct port_stats ps;
717a9643ea8Slogwang unsigned int i;
718a9643ea8Slogwang
719a9643ea8Slogwang cxgbe_stats_get(pi, &ps);
720a9643ea8Slogwang
721a9643ea8Slogwang /* RX Stats */
722a9643ea8Slogwang eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
723a9643ea8Slogwang ps.rx_ovflow2 + ps.rx_ovflow3 +
724a9643ea8Slogwang ps.rx_trunc0 + ps.rx_trunc1 +
725a9643ea8Slogwang ps.rx_trunc2 + ps.rx_trunc3;
726a9643ea8Slogwang eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
727a9643ea8Slogwang ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
728a9643ea8Slogwang ps.rx_len_err;
729a9643ea8Slogwang
730a9643ea8Slogwang /* TX Stats */
731a9643ea8Slogwang eth_stats->opackets = ps.tx_frames;
732a9643ea8Slogwang eth_stats->obytes = ps.tx_octets;
733a9643ea8Slogwang eth_stats->oerrors = ps.tx_error_frames;
734a9643ea8Slogwang
735a9643ea8Slogwang for (i = 0; i < pi->n_rx_qsets; i++) {
736a9643ea8Slogwang struct sge_eth_rxq *rxq =
737*2d9fd380Sjfb8856606 &s->ethrxq[pi->first_rxqset + i];
738a9643ea8Slogwang
739a9643ea8Slogwang eth_stats->q_ipackets[i] = rxq->stats.pkts;
740a9643ea8Slogwang eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
7412bfe3f2eSlogwang eth_stats->ipackets += eth_stats->q_ipackets[i];
7422bfe3f2eSlogwang eth_stats->ibytes += eth_stats->q_ibytes[i];
743a9643ea8Slogwang }
744a9643ea8Slogwang
745a9643ea8Slogwang for (i = 0; i < pi->n_tx_qsets; i++) {
746a9643ea8Slogwang struct sge_eth_txq *txq =
747*2d9fd380Sjfb8856606 &s->ethtxq[pi->first_txqset + i];
748a9643ea8Slogwang
749a9643ea8Slogwang eth_stats->q_opackets[i] = txq->stats.pkts;
750a9643ea8Slogwang eth_stats->q_obytes[i] = txq->stats.tx_bytes;
751a9643ea8Slogwang }
7522bfe3f2eSlogwang return 0;
753a9643ea8Slogwang }
754a9643ea8Slogwang
755a9643ea8Slogwang /*
756a9643ea8Slogwang * Reset port statistics.
757a9643ea8Slogwang */
cxgbe_dev_stats_reset(struct rte_eth_dev * eth_dev)7584418919fSjohnjiang static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
759a9643ea8Slogwang {
7604b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
761a9643ea8Slogwang struct adapter *adapter = pi->adapter;
762a9643ea8Slogwang struct sge *s = &adapter->sge;
763a9643ea8Slogwang unsigned int i;
764a9643ea8Slogwang
765a9643ea8Slogwang cxgbe_stats_reset(pi);
766a9643ea8Slogwang for (i = 0; i < pi->n_rx_qsets; i++) {
767a9643ea8Slogwang struct sge_eth_rxq *rxq =
768*2d9fd380Sjfb8856606 &s->ethrxq[pi->first_rxqset + i];
769a9643ea8Slogwang
770a9643ea8Slogwang rxq->stats.pkts = 0;
771a9643ea8Slogwang rxq->stats.rx_bytes = 0;
772a9643ea8Slogwang }
773a9643ea8Slogwang for (i = 0; i < pi->n_tx_qsets; i++) {
774a9643ea8Slogwang struct sge_eth_txq *txq =
775*2d9fd380Sjfb8856606 &s->ethtxq[pi->first_txqset + i];
776a9643ea8Slogwang
777a9643ea8Slogwang txq->stats.pkts = 0;
778a9643ea8Slogwang txq->stats.tx_bytes = 0;
779a9643ea8Slogwang txq->stats.mapping_err = 0;
780a9643ea8Slogwang }
7814418919fSjohnjiang
7824418919fSjohnjiang return 0;
783a9643ea8Slogwang }
784a9643ea8Slogwang
cxgbe_flow_ctrl_get(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)785a9643ea8Slogwang static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
786a9643ea8Slogwang struct rte_eth_fc_conf *fc_conf)
787a9643ea8Slogwang {
7884b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
789a9643ea8Slogwang struct link_config *lc = &pi->link_cfg;
790a9643ea8Slogwang int rx_pause, tx_pause;
791a9643ea8Slogwang
792a9643ea8Slogwang fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
793a9643ea8Slogwang rx_pause = lc->fc & PAUSE_RX;
794a9643ea8Slogwang tx_pause = lc->fc & PAUSE_TX;
795a9643ea8Slogwang
796a9643ea8Slogwang if (rx_pause && tx_pause)
797a9643ea8Slogwang fc_conf->mode = RTE_FC_FULL;
798a9643ea8Slogwang else if (rx_pause)
799a9643ea8Slogwang fc_conf->mode = RTE_FC_RX_PAUSE;
800a9643ea8Slogwang else if (tx_pause)
801a9643ea8Slogwang fc_conf->mode = RTE_FC_TX_PAUSE;
802a9643ea8Slogwang else
803a9643ea8Slogwang fc_conf->mode = RTE_FC_NONE;
804a9643ea8Slogwang return 0;
805a9643ea8Slogwang }
806a9643ea8Slogwang
cxgbe_flow_ctrl_set(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)807a9643ea8Slogwang static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
808a9643ea8Slogwang struct rte_eth_fc_conf *fc_conf)
809a9643ea8Slogwang {
8104b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
811a9643ea8Slogwang struct adapter *adapter = pi->adapter;
812a9643ea8Slogwang struct link_config *lc = &pi->link_cfg;
813a9643ea8Slogwang
814d30ea906Sjfb8856606 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
815a9643ea8Slogwang if (fc_conf->autoneg)
816a9643ea8Slogwang lc->requested_fc |= PAUSE_AUTONEG;
817a9643ea8Slogwang else
818a9643ea8Slogwang lc->requested_fc &= ~PAUSE_AUTONEG;
819a9643ea8Slogwang }
820a9643ea8Slogwang
821a9643ea8Slogwang if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
822a9643ea8Slogwang (fc_conf->mode & RTE_FC_RX_PAUSE))
823a9643ea8Slogwang lc->requested_fc |= PAUSE_RX;
824a9643ea8Slogwang else
825a9643ea8Slogwang lc->requested_fc &= ~PAUSE_RX;
826a9643ea8Slogwang
827a9643ea8Slogwang if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
828a9643ea8Slogwang (fc_conf->mode & RTE_FC_TX_PAUSE))
829a9643ea8Slogwang lc->requested_fc |= PAUSE_TX;
830a9643ea8Slogwang else
831a9643ea8Slogwang lc->requested_fc &= ~PAUSE_TX;
832a9643ea8Slogwang
833a9643ea8Slogwang return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
834a9643ea8Slogwang &pi->link_cfg);
835a9643ea8Slogwang }
836a9643ea8Slogwang
837d30ea906Sjfb8856606 const uint32_t *
cxgbe_dev_supported_ptypes_get(struct rte_eth_dev * eth_dev)838a9643ea8Slogwang cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
839a9643ea8Slogwang {
840a9643ea8Slogwang static const uint32_t ptypes[] = {
841a9643ea8Slogwang RTE_PTYPE_L3_IPV4,
842a9643ea8Slogwang RTE_PTYPE_L3_IPV6,
843a9643ea8Slogwang RTE_PTYPE_UNKNOWN
844a9643ea8Slogwang };
845a9643ea8Slogwang
846a9643ea8Slogwang if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
847a9643ea8Slogwang return ptypes;
848a9643ea8Slogwang return NULL;
849a9643ea8Slogwang }
850a9643ea8Slogwang
851d30ea906Sjfb8856606 /* Update RSS hash configuration
852d30ea906Sjfb8856606 */
cxgbe_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)853d30ea906Sjfb8856606 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
854d30ea906Sjfb8856606 struct rte_eth_rss_conf *rss_conf)
855d30ea906Sjfb8856606 {
8564b05018fSfengbojiang struct port_info *pi = dev->data->dev_private;
857d30ea906Sjfb8856606 struct adapter *adapter = pi->adapter;
858d30ea906Sjfb8856606 int err;
859d30ea906Sjfb8856606
860d30ea906Sjfb8856606 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
861d30ea906Sjfb8856606 if (err)
862d30ea906Sjfb8856606 return err;
863d30ea906Sjfb8856606
864d30ea906Sjfb8856606 pi->rss_hf = rss_conf->rss_hf;
865d30ea906Sjfb8856606
866d30ea906Sjfb8856606 if (rss_conf->rss_key) {
867d30ea906Sjfb8856606 u32 key[10], mod_key[10];
868d30ea906Sjfb8856606 int i, j;
869d30ea906Sjfb8856606
870d30ea906Sjfb8856606 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
871d30ea906Sjfb8856606
872d30ea906Sjfb8856606 for (i = 9, j = 0; i >= 0; i--, j++)
873d30ea906Sjfb8856606 mod_key[j] = cpu_to_be32(key[i]);
874d30ea906Sjfb8856606
875d30ea906Sjfb8856606 t4_write_rss_key(adapter, mod_key, -1);
876d30ea906Sjfb8856606 }
877d30ea906Sjfb8856606
878d30ea906Sjfb8856606 return 0;
879d30ea906Sjfb8856606 }
880d30ea906Sjfb8856606
881d30ea906Sjfb8856606 /* Get RSS hash configuration
882d30ea906Sjfb8856606 */
cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)883d30ea906Sjfb8856606 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
884d30ea906Sjfb8856606 struct rte_eth_rss_conf *rss_conf)
885d30ea906Sjfb8856606 {
8864b05018fSfengbojiang struct port_info *pi = dev->data->dev_private;
887d30ea906Sjfb8856606 struct adapter *adapter = pi->adapter;
888d30ea906Sjfb8856606 u64 rss_hf = 0;
889d30ea906Sjfb8856606 u64 flags = 0;
890d30ea906Sjfb8856606 int err;
891d30ea906Sjfb8856606
892d30ea906Sjfb8856606 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
893d30ea906Sjfb8856606 &flags, NULL);
894d30ea906Sjfb8856606
895d30ea906Sjfb8856606 if (err)
896d30ea906Sjfb8856606 return err;
897d30ea906Sjfb8856606
898d30ea906Sjfb8856606 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
899d30ea906Sjfb8856606 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
900d30ea906Sjfb8856606 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
901d30ea906Sjfb8856606 rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
902d30ea906Sjfb8856606 }
903d30ea906Sjfb8856606
904d30ea906Sjfb8856606 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
905d30ea906Sjfb8856606 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
906d30ea906Sjfb8856606
907d30ea906Sjfb8856606 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
908d30ea906Sjfb8856606 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
909d30ea906Sjfb8856606 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
910d30ea906Sjfb8856606 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
911d30ea906Sjfb8856606 }
912d30ea906Sjfb8856606
913d30ea906Sjfb8856606 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
914d30ea906Sjfb8856606 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
915d30ea906Sjfb8856606
916d30ea906Sjfb8856606 rss_conf->rss_hf = rss_hf;
917d30ea906Sjfb8856606
918d30ea906Sjfb8856606 if (rss_conf->rss_key) {
919d30ea906Sjfb8856606 u32 key[10], mod_key[10];
920d30ea906Sjfb8856606 int i, j;
921d30ea906Sjfb8856606
922d30ea906Sjfb8856606 t4_read_rss_key(adapter, key);
923d30ea906Sjfb8856606
924d30ea906Sjfb8856606 for (i = 9, j = 0; i >= 0; i--, j++)
925d30ea906Sjfb8856606 mod_key[j] = be32_to_cpu(key[i]);
926d30ea906Sjfb8856606
927d30ea906Sjfb8856606 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
928d30ea906Sjfb8856606 }
929d30ea906Sjfb8856606
930d30ea906Sjfb8856606 return 0;
931d30ea906Sjfb8856606 }
932d30ea906Sjfb8856606
cxgbe_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)933*2d9fd380Sjfb8856606 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
934*2d9fd380Sjfb8856606 struct rte_eth_rss_reta_entry64 *reta_conf,
935*2d9fd380Sjfb8856606 uint16_t reta_size)
936*2d9fd380Sjfb8856606 {
937*2d9fd380Sjfb8856606 struct port_info *pi = dev->data->dev_private;
938*2d9fd380Sjfb8856606 struct adapter *adapter = pi->adapter;
939*2d9fd380Sjfb8856606 u16 i, idx, shift, *rss;
940*2d9fd380Sjfb8856606 int ret;
941*2d9fd380Sjfb8856606
942*2d9fd380Sjfb8856606 if (!(adapter->flags & FULL_INIT_DONE))
943*2d9fd380Sjfb8856606 return -ENOMEM;
944*2d9fd380Sjfb8856606
945*2d9fd380Sjfb8856606 if (!reta_size || reta_size > pi->rss_size)
946*2d9fd380Sjfb8856606 return -EINVAL;
947*2d9fd380Sjfb8856606
948*2d9fd380Sjfb8856606 rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
949*2d9fd380Sjfb8856606 if (!rss)
950*2d9fd380Sjfb8856606 return -ENOMEM;
951*2d9fd380Sjfb8856606
952*2d9fd380Sjfb8856606 rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
953*2d9fd380Sjfb8856606 for (i = 0; i < reta_size; i++) {
954*2d9fd380Sjfb8856606 idx = i / RTE_RETA_GROUP_SIZE;
955*2d9fd380Sjfb8856606 shift = i % RTE_RETA_GROUP_SIZE;
956*2d9fd380Sjfb8856606 if (!(reta_conf[idx].mask & (1ULL << shift)))
957*2d9fd380Sjfb8856606 continue;
958*2d9fd380Sjfb8856606
959*2d9fd380Sjfb8856606 rss[i] = reta_conf[idx].reta[shift];
960*2d9fd380Sjfb8856606 }
961*2d9fd380Sjfb8856606
962*2d9fd380Sjfb8856606 ret = cxgbe_write_rss(pi, rss);
963*2d9fd380Sjfb8856606 if (!ret)
964*2d9fd380Sjfb8856606 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
965*2d9fd380Sjfb8856606
966*2d9fd380Sjfb8856606 rte_free(rss);
967*2d9fd380Sjfb8856606 return ret;
968*2d9fd380Sjfb8856606 }
969*2d9fd380Sjfb8856606
cxgbe_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)970*2d9fd380Sjfb8856606 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
971*2d9fd380Sjfb8856606 struct rte_eth_rss_reta_entry64 *reta_conf,
972*2d9fd380Sjfb8856606 uint16_t reta_size)
973*2d9fd380Sjfb8856606 {
974*2d9fd380Sjfb8856606 struct port_info *pi = dev->data->dev_private;
975*2d9fd380Sjfb8856606 struct adapter *adapter = pi->adapter;
976*2d9fd380Sjfb8856606 u16 i, idx, shift;
977*2d9fd380Sjfb8856606
978*2d9fd380Sjfb8856606 if (!(adapter->flags & FULL_INIT_DONE))
979*2d9fd380Sjfb8856606 return -ENOMEM;
980*2d9fd380Sjfb8856606
981*2d9fd380Sjfb8856606 if (!reta_size || reta_size > pi->rss_size)
982*2d9fd380Sjfb8856606 return -EINVAL;
983*2d9fd380Sjfb8856606
984*2d9fd380Sjfb8856606 for (i = 0; i < reta_size; i++) {
985*2d9fd380Sjfb8856606 idx = i / RTE_RETA_GROUP_SIZE;
986*2d9fd380Sjfb8856606 shift = i % RTE_RETA_GROUP_SIZE;
987*2d9fd380Sjfb8856606 if (!(reta_conf[idx].mask & (1ULL << shift)))
988*2d9fd380Sjfb8856606 continue;
989*2d9fd380Sjfb8856606
990*2d9fd380Sjfb8856606 reta_conf[idx].reta[shift] = pi->rss[i];
991*2d9fd380Sjfb8856606 }
992*2d9fd380Sjfb8856606
993*2d9fd380Sjfb8856606 return 0;
994*2d9fd380Sjfb8856606 }
995*2d9fd380Sjfb8856606
cxgbe_get_eeprom_length(struct rte_eth_dev * dev)996a9643ea8Slogwang static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
997a9643ea8Slogwang {
998a9643ea8Slogwang RTE_SET_USED(dev);
999a9643ea8Slogwang return EEPROMSIZE;
1000a9643ea8Slogwang }
1001a9643ea8Slogwang
1002a9643ea8Slogwang /**
1003a9643ea8Slogwang * eeprom_ptov - translate a physical EEPROM address to virtual
1004a9643ea8Slogwang * @phys_addr: the physical EEPROM address
1005a9643ea8Slogwang * @fn: the PCI function number
1006a9643ea8Slogwang * @sz: size of function-specific area
1007a9643ea8Slogwang *
1008a9643ea8Slogwang * Translate a physical EEPROM address to virtual. The first 1K is
1009a9643ea8Slogwang * accessed through virtual addresses starting at 31K, the rest is
1010a9643ea8Slogwang * accessed through virtual addresses starting at 0.
1011a9643ea8Slogwang *
1012a9643ea8Slogwang * The mapping is as follows:
1013a9643ea8Slogwang * [0..1K) -> [31K..32K)
1014a9643ea8Slogwang * [1K..1K+A) -> [31K-A..31K)
1015a9643ea8Slogwang * [1K+A..ES) -> [0..ES-A-1K)
1016a9643ea8Slogwang *
1017a9643ea8Slogwang * where A = @fn * @sz, and ES = EEPROM size.
1018a9643ea8Slogwang */
eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)1019a9643ea8Slogwang static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1020a9643ea8Slogwang {
1021a9643ea8Slogwang fn *= sz;
1022a9643ea8Slogwang if (phys_addr < 1024)
1023a9643ea8Slogwang return phys_addr + (31 << 10);
1024a9643ea8Slogwang if (phys_addr < 1024 + fn)
1025a9643ea8Slogwang return fn + phys_addr - 1024;
1026a9643ea8Slogwang if (phys_addr < EEPROMSIZE)
1027a9643ea8Slogwang return phys_addr - 1024 - fn;
1028a9643ea8Slogwang if (phys_addr < EEPROMVSIZE)
1029a9643ea8Slogwang return phys_addr - 1024;
1030a9643ea8Slogwang return -EINVAL;
1031a9643ea8Slogwang }
1032a9643ea8Slogwang
1033a9643ea8Slogwang /* The next two routines implement eeprom read/write from physical addresses.
1034a9643ea8Slogwang */
eeprom_rd_phys(struct adapter * adap,unsigned int phys_addr,u32 * v)1035a9643ea8Slogwang static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1036a9643ea8Slogwang {
1037a9643ea8Slogwang int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1038a9643ea8Slogwang
1039a9643ea8Slogwang if (vaddr >= 0)
1040a9643ea8Slogwang vaddr = t4_seeprom_read(adap, vaddr, v);
1041a9643ea8Slogwang return vaddr < 0 ? vaddr : 0;
1042a9643ea8Slogwang }
1043a9643ea8Slogwang
eeprom_wr_phys(struct adapter * adap,unsigned int phys_addr,u32 v)1044a9643ea8Slogwang static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1045a9643ea8Slogwang {
1046a9643ea8Slogwang int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1047a9643ea8Slogwang
1048a9643ea8Slogwang if (vaddr >= 0)
1049a9643ea8Slogwang vaddr = t4_seeprom_write(adap, vaddr, v);
1050a9643ea8Slogwang return vaddr < 0 ? vaddr : 0;
1051a9643ea8Slogwang }
1052a9643ea8Slogwang
1053a9643ea8Slogwang #define EEPROM_MAGIC 0x38E2F10C
1054a9643ea8Slogwang
cxgbe_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * e)1055a9643ea8Slogwang static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1056a9643ea8Slogwang struct rte_dev_eeprom_info *e)
1057a9643ea8Slogwang {
10584b05018fSfengbojiang struct port_info *pi = dev->data->dev_private;
1059a9643ea8Slogwang struct adapter *adapter = pi->adapter;
1060a9643ea8Slogwang u32 i, err = 0;
1061a9643ea8Slogwang u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1062a9643ea8Slogwang
1063a9643ea8Slogwang if (!buf)
1064a9643ea8Slogwang return -ENOMEM;
1065a9643ea8Slogwang
1066a9643ea8Slogwang e->magic = EEPROM_MAGIC;
1067a9643ea8Slogwang for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1068a9643ea8Slogwang err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1069a9643ea8Slogwang
1070a9643ea8Slogwang if (!err)
1071a9643ea8Slogwang rte_memcpy(e->data, buf + e->offset, e->length);
1072a9643ea8Slogwang rte_free(buf);
1073a9643ea8Slogwang return err;
1074a9643ea8Slogwang }
1075a9643ea8Slogwang
cxgbe_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)1076a9643ea8Slogwang static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1077a9643ea8Slogwang struct rte_dev_eeprom_info *eeprom)
1078a9643ea8Slogwang {
10794b05018fSfengbojiang struct port_info *pi = dev->data->dev_private;
1080a9643ea8Slogwang struct adapter *adapter = pi->adapter;
1081a9643ea8Slogwang u8 *buf;
1082a9643ea8Slogwang int err = 0;
1083a9643ea8Slogwang u32 aligned_offset, aligned_len, *p;
1084a9643ea8Slogwang
1085a9643ea8Slogwang if (eeprom->magic != EEPROM_MAGIC)
1086a9643ea8Slogwang return -EINVAL;
1087a9643ea8Slogwang
1088a9643ea8Slogwang aligned_offset = eeprom->offset & ~3;
1089a9643ea8Slogwang aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1090a9643ea8Slogwang
1091a9643ea8Slogwang if (adapter->pf > 0) {
1092a9643ea8Slogwang u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1093a9643ea8Slogwang
1094a9643ea8Slogwang if (aligned_offset < start ||
1095a9643ea8Slogwang aligned_offset + aligned_len > start + EEPROMPFSIZE)
1096a9643ea8Slogwang return -EPERM;
1097a9643ea8Slogwang }
1098a9643ea8Slogwang
1099a9643ea8Slogwang if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1100a9643ea8Slogwang /* RMW possibly needed for first or last words.
1101a9643ea8Slogwang */
1102a9643ea8Slogwang buf = rte_zmalloc(NULL, aligned_len, 0);
1103a9643ea8Slogwang if (!buf)
1104a9643ea8Slogwang return -ENOMEM;
1105a9643ea8Slogwang err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1106a9643ea8Slogwang if (!err && aligned_len > 4)
1107a9643ea8Slogwang err = eeprom_rd_phys(adapter,
1108a9643ea8Slogwang aligned_offset + aligned_len - 4,
1109a9643ea8Slogwang (u32 *)&buf[aligned_len - 4]);
1110a9643ea8Slogwang if (err)
1111a9643ea8Slogwang goto out;
1112a9643ea8Slogwang rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1113a9643ea8Slogwang eeprom->length);
1114a9643ea8Slogwang } else {
1115a9643ea8Slogwang buf = eeprom->data;
1116a9643ea8Slogwang }
1117a9643ea8Slogwang
1118a9643ea8Slogwang err = t4_seeprom_wp(adapter, false);
1119a9643ea8Slogwang if (err)
1120a9643ea8Slogwang goto out;
1121a9643ea8Slogwang
1122a9643ea8Slogwang for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1123a9643ea8Slogwang err = eeprom_wr_phys(adapter, aligned_offset, *p);
1124a9643ea8Slogwang aligned_offset += 4;
1125a9643ea8Slogwang }
1126a9643ea8Slogwang
1127a9643ea8Slogwang if (!err)
1128a9643ea8Slogwang err = t4_seeprom_wp(adapter, true);
1129a9643ea8Slogwang out:
1130a9643ea8Slogwang if (buf != eeprom->data)
1131a9643ea8Slogwang rte_free(buf);
1132a9643ea8Slogwang return err;
1133a9643ea8Slogwang }
1134a9643ea8Slogwang
cxgbe_get_regs_len(struct rte_eth_dev * eth_dev)1135a9643ea8Slogwang static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1136a9643ea8Slogwang {
11374b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
1138a9643ea8Slogwang struct adapter *adapter = pi->adapter;
1139a9643ea8Slogwang
1140a9643ea8Slogwang return t4_get_regs_len(adapter) / sizeof(uint32_t);
1141a9643ea8Slogwang }
1142a9643ea8Slogwang
cxgbe_get_regs(struct rte_eth_dev * eth_dev,struct rte_dev_reg_info * regs)1143a9643ea8Slogwang static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1144a9643ea8Slogwang struct rte_dev_reg_info *regs)
1145a9643ea8Slogwang {
11464b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
1147a9643ea8Slogwang struct adapter *adapter = pi->adapter;
1148a9643ea8Slogwang
1149a9643ea8Slogwang regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1150a9643ea8Slogwang (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1151a9643ea8Slogwang (1 << 16);
1152a9643ea8Slogwang
1153a9643ea8Slogwang if (regs->data == NULL) {
1154a9643ea8Slogwang regs->length = cxgbe_get_regs_len(eth_dev);
1155a9643ea8Slogwang regs->width = sizeof(uint32_t);
1156a9643ea8Slogwang
1157a9643ea8Slogwang return 0;
1158a9643ea8Slogwang }
1159a9643ea8Slogwang
1160a9643ea8Slogwang t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1161a9643ea8Slogwang
1162a9643ea8Slogwang return 0;
1163a9643ea8Slogwang }
1164a9643ea8Slogwang
cxgbe_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * addr)11654418919fSjohnjiang int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1166d30ea906Sjfb8856606 {
11674b05018fSfengbojiang struct port_info *pi = dev->data->dev_private;
1168d30ea906Sjfb8856606 int ret;
1169d30ea906Sjfb8856606
1170d30ea906Sjfb8856606 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1171d30ea906Sjfb8856606 if (ret < 0) {
1172d30ea906Sjfb8856606 dev_err(adapter, "failed to set mac addr; err = %d\n",
1173d30ea906Sjfb8856606 ret);
1174d30ea906Sjfb8856606 return ret;
1175d30ea906Sjfb8856606 }
1176d30ea906Sjfb8856606 pi->xact_addr_filt = ret;
1177d30ea906Sjfb8856606 return 0;
1178d30ea906Sjfb8856606 }
1179d30ea906Sjfb8856606
1180a9643ea8Slogwang static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1181a9643ea8Slogwang .dev_start = cxgbe_dev_start,
1182a9643ea8Slogwang .dev_stop = cxgbe_dev_stop,
1183a9643ea8Slogwang .dev_close = cxgbe_dev_close,
1184a9643ea8Slogwang .promiscuous_enable = cxgbe_dev_promiscuous_enable,
1185a9643ea8Slogwang .promiscuous_disable = cxgbe_dev_promiscuous_disable,
1186a9643ea8Slogwang .allmulticast_enable = cxgbe_dev_allmulticast_enable,
1187a9643ea8Slogwang .allmulticast_disable = cxgbe_dev_allmulticast_disable,
1188a9643ea8Slogwang .dev_configure = cxgbe_dev_configure,
1189a9643ea8Slogwang .dev_infos_get = cxgbe_dev_info_get,
1190a9643ea8Slogwang .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1191a9643ea8Slogwang .link_update = cxgbe_dev_link_update,
1192d30ea906Sjfb8856606 .dev_set_link_up = cxgbe_dev_set_link_up,
1193d30ea906Sjfb8856606 .dev_set_link_down = cxgbe_dev_set_link_down,
1194a9643ea8Slogwang .mtu_set = cxgbe_dev_mtu_set,
1195a9643ea8Slogwang .tx_queue_setup = cxgbe_dev_tx_queue_setup,
1196a9643ea8Slogwang .tx_queue_start = cxgbe_dev_tx_queue_start,
1197a9643ea8Slogwang .tx_queue_stop = cxgbe_dev_tx_queue_stop,
1198a9643ea8Slogwang .tx_queue_release = cxgbe_dev_tx_queue_release,
1199a9643ea8Slogwang .rx_queue_setup = cxgbe_dev_rx_queue_setup,
1200a9643ea8Slogwang .rx_queue_start = cxgbe_dev_rx_queue_start,
1201a9643ea8Slogwang .rx_queue_stop = cxgbe_dev_rx_queue_stop,
1202a9643ea8Slogwang .rx_queue_release = cxgbe_dev_rx_queue_release,
1203d30ea906Sjfb8856606 .filter_ctrl = cxgbe_dev_filter_ctrl,
1204a9643ea8Slogwang .stats_get = cxgbe_dev_stats_get,
1205a9643ea8Slogwang .stats_reset = cxgbe_dev_stats_reset,
1206a9643ea8Slogwang .flow_ctrl_get = cxgbe_flow_ctrl_get,
1207a9643ea8Slogwang .flow_ctrl_set = cxgbe_flow_ctrl_set,
1208a9643ea8Slogwang .get_eeprom_length = cxgbe_get_eeprom_length,
1209a9643ea8Slogwang .get_eeprom = cxgbe_get_eeprom,
1210a9643ea8Slogwang .set_eeprom = cxgbe_set_eeprom,
1211a9643ea8Slogwang .get_reg = cxgbe_get_regs,
1212d30ea906Sjfb8856606 .rss_hash_update = cxgbe_dev_rss_hash_update,
1213d30ea906Sjfb8856606 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
1214d30ea906Sjfb8856606 .mac_addr_set = cxgbe_mac_addr_set,
1215*2d9fd380Sjfb8856606 .reta_update = cxgbe_dev_rss_reta_update,
1216*2d9fd380Sjfb8856606 .reta_query = cxgbe_dev_rss_reta_query,
1217a9643ea8Slogwang };
1218a9643ea8Slogwang
1219a9643ea8Slogwang /*
1220a9643ea8Slogwang * Initialize driver
1221a9643ea8Slogwang * It returns 0 on success.
1222a9643ea8Slogwang */
eth_cxgbe_dev_init(struct rte_eth_dev * eth_dev)1223a9643ea8Slogwang static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1224a9643ea8Slogwang {
1225a9643ea8Slogwang struct rte_pci_device *pci_dev;
12264b05018fSfengbojiang struct port_info *pi = eth_dev->data->dev_private;
1227a9643ea8Slogwang struct adapter *adapter = NULL;
1228a9643ea8Slogwang char name[RTE_ETH_NAME_MAX_LEN];
1229a9643ea8Slogwang int err = 0;
1230a9643ea8Slogwang
1231a9643ea8Slogwang CXGBE_FUNC_TRACE();
1232a9643ea8Slogwang
1233a9643ea8Slogwang eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1234a9643ea8Slogwang eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1235a9643ea8Slogwang eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
12365af785ecSfengbojiang(姜凤波) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
123728440c50Sjfb8856606
1238d30ea906Sjfb8856606 /* for secondary processes, we attach to ethdevs allocated by primary
1239d30ea906Sjfb8856606 * and do minimal initialization.
1240d30ea906Sjfb8856606 */
1241d30ea906Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1242d30ea906Sjfb8856606 int i;
1243d30ea906Sjfb8856606
1244d30ea906Sjfb8856606 for (i = 1; i < MAX_NPORTS; i++) {
1245d30ea906Sjfb8856606 struct rte_eth_dev *rest_eth_dev;
1246d30ea906Sjfb8856606 char namei[RTE_ETH_NAME_MAX_LEN];
1247d30ea906Sjfb8856606
1248d30ea906Sjfb8856606 snprintf(namei, sizeof(namei), "%s_%d",
1249d30ea906Sjfb8856606 pci_dev->device.name, i);
1250d30ea906Sjfb8856606 rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1251d30ea906Sjfb8856606 if (rest_eth_dev) {
1252d30ea906Sjfb8856606 rest_eth_dev->device = &pci_dev->device;
1253d30ea906Sjfb8856606 rest_eth_dev->dev_ops =
1254d30ea906Sjfb8856606 eth_dev->dev_ops;
1255d30ea906Sjfb8856606 rest_eth_dev->rx_pkt_burst =
1256d30ea906Sjfb8856606 eth_dev->rx_pkt_burst;
1257d30ea906Sjfb8856606 rest_eth_dev->tx_pkt_burst =
1258d30ea906Sjfb8856606 eth_dev->tx_pkt_burst;
1259d30ea906Sjfb8856606 rte_eth_dev_probing_finish(rest_eth_dev);
1260d30ea906Sjfb8856606 }
1261d30ea906Sjfb8856606 }
1262d30ea906Sjfb8856606 return 0;
1263d30ea906Sjfb8856606 }
1264d30ea906Sjfb8856606
1265*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1266*2d9fd380Sjfb8856606
1267a9643ea8Slogwang snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1268a9643ea8Slogwang adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1269a9643ea8Slogwang if (!adapter)
1270a9643ea8Slogwang return -1;
1271a9643ea8Slogwang
1272a9643ea8Slogwang adapter->use_unpacked_mode = 1;
1273a9643ea8Slogwang adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1274a9643ea8Slogwang if (!adapter->regs) {
1275a9643ea8Slogwang dev_err(adapter, "%s: cannot map device registers\n", __func__);
1276a9643ea8Slogwang err = -ENOMEM;
1277a9643ea8Slogwang goto out_free_adapter;
1278a9643ea8Slogwang }
1279a9643ea8Slogwang adapter->pdev = pci_dev;
1280a9643ea8Slogwang adapter->eth_dev = eth_dev;
1281a9643ea8Slogwang pi->adapter = adapter;
1282a9643ea8Slogwang
12834418919fSjohnjiang cxgbe_process_devargs(adapter);
12844418919fSjohnjiang
1285a9643ea8Slogwang err = cxgbe_probe(adapter);
1286a9643ea8Slogwang if (err) {
1287a9643ea8Slogwang dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1288a9643ea8Slogwang __func__, err);
1289a9643ea8Slogwang goto out_free_adapter;
1290a9643ea8Slogwang }
1291a9643ea8Slogwang
1292a9643ea8Slogwang return 0;
1293a9643ea8Slogwang
1294a9643ea8Slogwang out_free_adapter:
1295a9643ea8Slogwang rte_free(adapter);
1296a9643ea8Slogwang return err;
1297a9643ea8Slogwang }
1298a9643ea8Slogwang
eth_cxgbe_dev_uninit(struct rte_eth_dev * eth_dev)1299d30ea906Sjfb8856606 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1300d30ea906Sjfb8856606 {
1301*2d9fd380Sjfb8856606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1302*2d9fd380Sjfb8856606 uint16_t port_id;
1303*2d9fd380Sjfb8856606 int err = 0;
1304d30ea906Sjfb8856606
1305d30ea906Sjfb8856606 /* Free up other ports and all resources */
1306*2d9fd380Sjfb8856606 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1307*2d9fd380Sjfb8856606 err |= rte_eth_dev_close(port_id);
1308*2d9fd380Sjfb8856606
1309*2d9fd380Sjfb8856606 return err == 0 ? 0 : -EIO;
1310d30ea906Sjfb8856606 }
1311d30ea906Sjfb8856606
eth_cxgbe_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)13122bfe3f2eSlogwang static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
13132bfe3f2eSlogwang struct rte_pci_device *pci_dev)
1314a9643ea8Slogwang {
13152bfe3f2eSlogwang return rte_eth_dev_pci_generic_probe(pci_dev,
13162bfe3f2eSlogwang sizeof(struct port_info), eth_cxgbe_dev_init);
1317a9643ea8Slogwang }
1318a9643ea8Slogwang
eth_cxgbe_pci_remove(struct rte_pci_device * pci_dev)13192bfe3f2eSlogwang static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
13202bfe3f2eSlogwang {
1321d30ea906Sjfb8856606 return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
13222bfe3f2eSlogwang }
13232bfe3f2eSlogwang
13242bfe3f2eSlogwang static struct rte_pci_driver rte_cxgbe_pmd = {
13252bfe3f2eSlogwang .id_table = cxgb4_pci_tbl,
13262bfe3f2eSlogwang .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
13272bfe3f2eSlogwang .probe = eth_cxgbe_pci_probe,
13282bfe3f2eSlogwang .remove = eth_cxgbe_pci_remove,
1329a9643ea8Slogwang };
1330a9643ea8Slogwang
13312bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
13322bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
13332bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1334d30ea906Sjfb8856606 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
13354418919fSjohnjiang CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1336*2d9fd380Sjfb8856606 CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1337*2d9fd380Sjfb8856606 CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1338*2d9fd380Sjfb8856606 CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1339*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(cxgbe_logtype, pmd.net.cxgbe, NOTICE);
1340*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(cxgbe_mbox_logtype, pmd.net.cxgbe.mbox, NOTICE);
1341