1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
3 * All rights reserved.
4 */
5
6 /* This file should not be included directly. Include common.h instead. */
7
8 #ifndef __T4_ADAPTER_H__
9 #define __T4_ADAPTER_H__
10
11 #include <rte_bus_pci.h>
12 #include <rte_mbuf.h>
13 #include <rte_io.h>
14 #include <rte_rwlock.h>
15 #include <ethdev_driver.h>
16
17 #include "../cxgbe_compat.h"
18 #include "../cxgbe_ofld.h"
19 #include "t4_regs_values.h"
20
21 enum {
22 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
23 };
24
25 struct adapter;
26 struct sge_rspq;
27
28 enum {
29 PORT_RSS_DONE = (1 << 0),
30 };
31
32 struct port_info {
33 struct adapter *adapter; /* adapter that this port belongs to */
34 struct rte_eth_dev *eth_dev; /* associated rte eth device */
35 struct port_stats stats_base; /* port statistics base */
36 struct link_config link_cfg; /* link configuration info */
37
38 unsigned long flags; /* port related flags */
39 short int xact_addr_filt; /* index of exact MAC address filter */
40
41 u16 viid; /* associated virtual interface id */
42 u8 port_id; /* physical port ID */
43 u8 pidx; /* port index for this PF */
44 u8 tx_chan; /* associated channel */
45
46 u16 n_rx_qsets; /* # of rx qsets */
47 u16 n_tx_qsets; /* # of tx qsets */
48 u16 first_rxqset; /* index of first rxqset */
49 u16 first_txqset; /* index of first txqset */
50
51 u16 *rss; /* rss table */
52 u8 rss_mode; /* rss mode */
53 u16 rss_size; /* size of VI's RSS table slice */
54 u64 rss_hf; /* RSS Hash Function */
55
56 /* viid fields either returned by fw
57 * or decoded by parsing viid by driver.
58 */
59 u8 vin;
60 u8 vivld;
61
62 u8 vi_en_rx; /* Enable/disable VI Rx */
63 u8 vi_en_tx; /* Enable/disable VI Tx */
64 };
65
66 enum { /* adapter flags */
67 FULL_INIT_DONE = (1 << 0),
68 USING_MSI = (1 << 1),
69 USING_MSIX = (1 << 2),
70 FW_QUEUE_BOUND = (1 << 3),
71 FW_OK = (1 << 4),
72 CFG_QUEUES = (1 << 5),
73 MASTER_PF = (1 << 6),
74 };
75
76 struct rx_sw_desc { /* SW state per Rx descriptor */
77 void *buf; /* struct page or mbuf */
78 dma_addr_t dma_addr;
79 };
80
81 struct sge_fl { /* SGE free-buffer queue state */
82 /* RO fields */
83 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
84
85 dma_addr_t addr; /* bus address of HW ring start */
86 __be64 *desc; /* address of HW Rx descriptor ring */
87
88 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
89 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
90
91 unsigned int cntxt_id; /* SGE relative QID for the free list */
92 unsigned int size; /* capacity of free list */
93
94 unsigned int avail; /* # of available Rx buffers */
95 unsigned int pend_cred; /* new buffers since last FL DB ring */
96 unsigned int cidx; /* consumer index */
97 unsigned int pidx; /* producer index */
98
99 unsigned long alloc_failed; /* # of times buffer allocation failed */
100 unsigned long low; /* # of times momentarily starving */
101 u8 fl_buf_size_idx; /* Selected SGE_FL_BUFFER_SIZE index */
102 };
103
104 #define MAX_MBUF_FRAGS (16384 / 512 + 2)
105
106 /* A packet gather list */
107 struct pkt_gl {
108 union {
109 struct rte_mbuf *mbufs[MAX_MBUF_FRAGS];
110 } /* UNNAMED */;
111 void *va; /* virtual address of first byte */
112 unsigned int nfrags; /* # of fragments */
113 unsigned int tot_len; /* total length of fragments */
114 };
115
116 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
117 const struct pkt_gl *gl);
118
119 struct sge_rspq { /* state for an SGE response queue */
120 struct adapter *adapter; /* adapter that this queue belongs to */
121 struct rte_eth_dev *eth_dev; /* associated rte eth device */
122 struct rte_mempool *mb_pool; /* associated mempool */
123
124 dma_addr_t phys_addr; /* physical address of the ring */
125 __be64 *desc; /* address of HW response ring */
126 const __be64 *cur_desc; /* current descriptor in queue */
127
128 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
129 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
130 struct sge_qstat *stat;
131
132 unsigned int cidx; /* consumer index */
133 unsigned int gts_idx; /* last gts write sent */
134 unsigned int iqe_len; /* entry size */
135 unsigned int size; /* capacity of response queue */
136 int offset; /* offset into current Rx buffer */
137
138 u8 gen; /* current generation bit */
139 u8 intr_params; /* interrupt holdoff parameters */
140 u8 next_intr_params; /* holdoff params for next interrupt */
141 u8 pktcnt_idx; /* interrupt packet threshold */
142 u8 port_id; /* associated port-id */
143 u8 idx; /* queue index within its group */
144 u16 cntxt_id; /* SGE relative QID for the response Q */
145 u16 abs_id; /* absolute SGE id for the response q */
146
147 rspq_handler_t handler; /* associated handler for this response q */
148 };
149
150 struct sge_eth_rx_stats { /* Ethernet rx queue statistics */
151 u64 pkts; /* # of ethernet packets */
152 u64 rx_bytes; /* # of ethernet bytes */
153 u64 rx_cso; /* # of Rx checksum offloads */
154 u64 vlan_ex; /* # of Rx VLAN extractions */
155 u64 rx_drops; /* # of packets dropped due to no mem */
156 };
157
158 struct sge_eth_rxq { /* a SW Ethernet Rx queue */
159 unsigned int flags; /* flags for state of the queue */
160 struct sge_rspq rspq;
161 struct sge_fl fl;
162 struct sge_eth_rx_stats stats;
163 } __rte_cache_aligned;
164
165 /*
166 * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per
167 * packet (if one sgl is present) and type 1 needs 32 bytes. This means
168 * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit
169 * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR
170 * to be able to free those mbufs when we get completions back from the FW.
171 * Allocating the maximum number of pointers in every tx desc is a waste
172 * of memory resources so we only store 2 pointers per tx desc which should
173 * be enough since a tx desc can only fit 2 packets in the best case
174 * scenario where a packet needs 32 bytes.
175 */
176 #define ETH_COALESCE_PKT_NUM 15
177 #define ETH_COALESCE_VF_PKT_NUM 7
178 #define ETH_COALESCE_PKT_PER_DESC 2
179
180 struct tx_eth_coal_desc {
181 struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC];
182 struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC];
183 int idx;
184 };
185
186 struct tx_desc {
187 __be64 flit[8];
188 };
189
190 struct tx_sw_desc { /* SW state per Tx descriptor */
191 struct rte_mbuf *mbuf;
192 struct ulptx_sgl *sgl;
193 struct tx_eth_coal_desc coalesce;
194 };
195
196 enum cxgbe_txq_state {
197 EQ_STOPPED = (1 << 0),
198 };
199
200 enum cxgbe_rxq_state {
201 IQ_STOPPED = (1 << 0),
202 };
203
204 struct eth_coalesce {
205 unsigned char *ptr;
206 unsigned char type;
207 unsigned int idx;
208 unsigned int len;
209 unsigned int flits;
210 unsigned int max;
211 __u8 ethmacdst[ETHER_ADDR_LEN];
212 __u8 ethmacsrc[ETHER_ADDR_LEN];
213 __be16 ethtype;
214 __be16 vlantci;
215 };
216
217 struct sge_txq {
218 struct tx_desc *desc; /* address of HW Tx descriptor ring */
219 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
220 struct sge_qstat *stat; /* queue status entry */
221 struct eth_coalesce coalesce; /* coalesce info */
222
223 uint64_t phys_addr; /* physical address of the ring */
224
225 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
226 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
227
228 unsigned int cntxt_id; /* SGE relative QID for the Tx Q */
229 unsigned int in_use; /* # of in-use Tx descriptors */
230 unsigned int size; /* # of descriptors */
231 unsigned int cidx; /* SW consumer index */
232 unsigned int pidx; /* producer index */
233 unsigned int dbidx; /* last idx when db ring was done */
234 unsigned int equeidx; /* last sent credit request */
235 unsigned int last_pidx; /* last pidx recorded by tx monitor */
236 unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
237 unsigned int abs_id;
238
239 int db_disabled; /* doorbell state */
240 unsigned short db_pidx; /* doorbell producer index */
241 unsigned short db_pidx_inc; /* doorbell producer increment */
242 };
243
244 struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
245 u64 pkts; /* # of ethernet packets */
246 u64 tx_bytes; /* # of ethernet bytes */
247 u64 tso; /* # of TSO requests */
248 u64 tx_cso; /* # of Tx checksum offloads */
249 u64 vlan_ins; /* # of Tx VLAN insertions */
250 u64 mapping_err; /* # of I/O MMU packet mapping errors */
251 u64 coal_wr; /* # of coalesced wr */
252 u64 coal_pkts; /* # of coalesced packets */
253 };
254
255 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
256 struct sge_txq q;
257 struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
258 struct rte_eth_dev_data *data;
259 struct sge_eth_tx_stats stats; /* queue statistics */
260 rte_spinlock_t txq_lock;
261
262 unsigned int flags; /* flags for state of the queue */
263 } __rte_cache_aligned;
264
265 struct sge_ctrl_txq { /* State for an SGE control Tx queue */
266 struct sge_txq q; /* txq */
267 struct adapter *adapter; /* adapter associated with this queue */
268 rte_spinlock_t ctrlq_lock; /* control queue lock */
269 u8 full; /* the Tx ring is full */
270 u64 txp; /* number of transmits */
271 struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
272 } __rte_cache_aligned;
273
274 struct sge {
275 struct sge_eth_txq *ethtxq;
276 struct sge_eth_rxq *ethrxq;
277 struct sge_rspq fw_evtq __rte_cache_aligned;
278 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
279
280 u16 max_ethqsets; /* # of available Ethernet queue sets */
281 u32 stat_len; /* length of status page at ring end */
282 u32 pktshift; /* padding between CPL & packet data */
283
284 /* response queue interrupt parameters */
285 u16 timer_val[SGE_NTIMERS];
286 u8 counter_val[SGE_NCOUNTERS];
287
288 u32 fl_starve_thres; /* Free List starvation threshold */
289 u32 fl_buffer_size[SGE_FL_BUFFER_SIZE_NUM]; /* Free List buffer sizes */
290 };
291
292 /*
293 * OS Lock/List primitives for those interfaces in the Common Code which
294 * need this.
295 */
296
297 struct mbox_entry {
298 TAILQ_ENTRY(mbox_entry) next;
299 };
300
301 TAILQ_HEAD(mbox_list, mbox_entry);
302
303 struct adapter_devargs {
304 bool keep_ovlan;
305 bool force_link_up;
306 bool tx_mode_latency;
307 u32 filtermode;
308 u32 filtermask;
309 };
310
311 struct adapter {
312 struct rte_pci_device *pdev; /* associated rte pci device */
313 struct rte_eth_dev *eth_dev; /* first port's rte eth device */
314 struct adapter_params params; /* adapter parameters */
315 struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
316 struct sge sge; /* associated SGE */
317
318 /* support for single-threading access to adapter mailbox registers */
319 struct mbox_list mbox_list;
320 rte_spinlock_t mbox_lock;
321
322 u8 *regs; /* pointer to registers region */
323 u8 *bar2; /* pointer to bar2 region */
324 unsigned long flags; /* adapter flags */
325 unsigned int mbox; /* associated mailbox */
326 unsigned int pf; /* associated physical function id */
327
328 unsigned int vpd_busy;
329 unsigned int vpd_flag;
330
331 int use_unpacked_mode; /* unpacked rx mode state */
332 rte_spinlock_t win0_lock;
333
334 rte_spinlock_t flow_lock; /* Serialize access for rte_flow ops */
335
336 unsigned int clipt_start; /* CLIP table start */
337 unsigned int clipt_end; /* CLIP table end */
338 unsigned int l2t_start; /* Layer 2 table start */
339 unsigned int l2t_end; /* Layer 2 table end */
340 struct clip_tbl *clipt; /* CLIP table */
341 struct l2t_data *l2t; /* Layer 2 table */
342 struct smt_data *smt; /* Source mac table */
343 struct mpstcam_table *mpstcam;
344
345 struct tid_info tids; /* Info used to access TID related tables */
346
347 struct adapter_devargs devargs;
348 };
349
350 /**
351 * t4_os_rwlock_init - initialize rwlock
352 * @lock: the rwlock
353 */
t4_os_rwlock_init(rte_rwlock_t * lock)354 static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
355 {
356 rte_rwlock_init(lock);
357 }
358
359 /**
360 * t4_os_write_lock - get a write lock
361 * @lock: the rwlock
362 */
t4_os_write_lock(rte_rwlock_t * lock)363 static inline void t4_os_write_lock(rte_rwlock_t *lock)
364 {
365 rte_rwlock_write_lock(lock);
366 }
367
368 /**
369 * t4_os_write_unlock - unlock a write lock
370 * @lock: the rwlock
371 */
t4_os_write_unlock(rte_rwlock_t * lock)372 static inline void t4_os_write_unlock(rte_rwlock_t *lock)
373 {
374 rte_rwlock_write_unlock(lock);
375 }
376
377 /**
378 * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
379 * @dev: the rte_eth_dev
380 *
381 * Return the struct port_info associated with a rte_eth_dev
382 */
ethdev2pinfo(const struct rte_eth_dev * dev)383 static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
384 {
385 return dev->data->dev_private;
386 }
387
388 /**
389 * adap2pinfo - return the port_info of a port
390 * @adap: the adapter
391 * @idx: the port index
392 *
393 * Return the port_info structure for the port of the given index.
394 */
adap2pinfo(const struct adapter * adap,int idx)395 static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
396 {
397 return adap->port[idx];
398 }
399
400 /**
401 * ethdev2adap - return the adapter structure associated with a rte_eth_dev
402 * @dev: the rte_eth_dev
403 *
404 * Return the struct adapter associated with a rte_eth_dev
405 */
ethdev2adap(const struct rte_eth_dev * dev)406 static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
407 {
408 return ethdev2pinfo(dev)->adapter;
409 }
410
411 #define CXGBE_PCI_REG(reg) rte_read32(reg)
412
cxgbe_read_addr64(volatile void * addr)413 static inline uint64_t cxgbe_read_addr64(volatile void *addr)
414 {
415 uint64_t val = CXGBE_PCI_REG(addr);
416 uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4));
417
418 val2 = (uint64_t)(val2 << 32);
419 val += val2;
420 return val;
421 }
422
cxgbe_read_addr(volatile void * addr)423 static inline uint32_t cxgbe_read_addr(volatile void *addr)
424 {
425 return CXGBE_PCI_REG(addr);
426 }
427
428 #define CXGBE_PCI_REG_ADDR(adap, reg) \
429 ((volatile uint32_t *)((char *)(adap)->regs + (reg)))
430
431 #define CXGBE_READ_REG(adap, reg) \
432 cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg)))
433
434 #define CXGBE_READ_REG64(adap, reg) \
435 cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
436
437 #define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
438
439 #define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
440 rte_write32_relaxed((value), (reg))
441
442 #define CXGBE_WRITE_REG(adap, reg, value) \
443 CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
444
445 #define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \
446 CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
447
cxgbe_write_addr64(volatile void * addr,uint64_t val)448 static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
449 {
450 CXGBE_PCI_REG_WRITE(addr, val);
451 CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32));
452 return val;
453 }
454
455 #define CXGBE_WRITE_REG64(adap, reg, value) \
456 cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
457
458 /**
459 * t4_read_reg - read a HW register
460 * @adapter: the adapter
461 * @reg_addr: the register address
462 *
463 * Returns the 32-bit value of the given HW register.
464 */
t4_read_reg(struct adapter * adapter,u32 reg_addr)465 static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
466 {
467 return CXGBE_READ_REG(adapter, reg_addr);
468 }
469
470 /**
471 * t4_write_reg - write a HW register with barrier
472 * @adapter: the adapter
473 * @reg_addr: the register address
474 * @val: the value to write
475 *
476 * Write a 32-bit value into the given HW register.
477 */
t4_write_reg(struct adapter * adapter,u32 reg_addr,u32 val)478 static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
479 {
480 CXGBE_WRITE_REG(adapter, reg_addr, val);
481 }
482
483 /**
484 * t4_write_reg_relaxed - write a HW register with no barrier
485 * @adapter: the adapter
486 * @reg_addr: the register address
487 * @val: the value to write
488 *
489 * Write a 32-bit value into the given HW register.
490 */
t4_write_reg_relaxed(struct adapter * adapter,u32 reg_addr,u32 val)491 static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr,
492 u32 val)
493 {
494 CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val);
495 }
496
497 /**
498 * t4_read_reg64 - read a 64-bit HW register
499 * @adapter: the adapter
500 * @reg_addr: the register address
501 *
502 * Returns the 64-bit value of the given HW register.
503 */
t4_read_reg64(struct adapter * adapter,u32 reg_addr)504 static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
505 {
506 return CXGBE_READ_REG64(adapter, reg_addr);
507 }
508
509 /**
510 * t4_write_reg64 - write a 64-bit HW register
511 * @adapter: the adapter
512 * @reg_addr: the register address
513 * @val: the value to write
514 *
515 * Write a 64-bit value into the given HW register.
516 */
t4_write_reg64(struct adapter * adapter,u32 reg_addr,u64 val)517 static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
518 u64 val)
519 {
520 CXGBE_WRITE_REG64(adapter, reg_addr, val);
521 }
522
523 #define PCI_STATUS 0x06 /* 16 bits */
524 #define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
525 #define PCI_CAPABILITY_LIST 0x34
526 /* Offset of first capability list entry */
527 #define PCI_CAP_ID_EXP 0x10 /* PCI Express */
528 #define PCI_CAP_LIST_ID 0 /* Capability ID */
529 #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
530 #define PCI_EXP_DEVCTL 0x0008 /* Device control */
531 #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
532 #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
533 #define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */
534 #define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
535 #define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
536 #define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
537 #define PCI_VPD_DATA 4 /* 32-bits of data returned here */
538
539 /**
540 * t4_os_pci_write_cfg4 - 32-bit write to PCI config space
541 * @adapter: the adapter
542 * @addr: the register address
543 * @val: the value to write
544 *
545 * Write a 32-bit value into the given register in PCI config space.
546 */
t4_os_pci_write_cfg4(struct adapter * adapter,size_t addr,off_t val)547 static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
548 off_t val)
549 {
550 u32 val32 = val;
551
552 if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32),
553 addr) < 0)
554 dev_err(adapter, "Can't write to PCI config space\n");
555 }
556
557 /**
558 * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space
559 * @adapter: the adapter
560 * @addr: the register address
561 * @val: where to store the value read
562 *
563 * Read a 32-bit value from the given register in PCI config space.
564 */
t4_os_pci_read_cfg4(struct adapter * adapter,size_t addr,u32 * val)565 static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
566 u32 *val)
567 {
568 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
569 addr) < 0)
570 dev_err(adapter, "Can't read from PCI config space\n");
571 }
572
573 /**
574 * t4_os_pci_write_cfg2 - 16-bit write to PCI config space
575 * @adapter: the adapter
576 * @addr: the register address
577 * @val: the value to write
578 *
579 * Write a 16-bit value into the given register in PCI config space.
580 */
t4_os_pci_write_cfg2(struct adapter * adapter,size_t addr,off_t val)581 static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
582 off_t val)
583 {
584 u16 val16 = val;
585
586 if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16),
587 addr) < 0)
588 dev_err(adapter, "Can't write to PCI config space\n");
589 }
590
591 /**
592 * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space
593 * @adapter: the adapter
594 * @addr: the register address
595 * @val: where to store the value read
596 *
597 * Read a 16-bit value from the given register in PCI config space.
598 */
t4_os_pci_read_cfg2(struct adapter * adapter,size_t addr,u16 * val)599 static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
600 u16 *val)
601 {
602 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
603 addr) < 0)
604 dev_err(adapter, "Can't read from PCI config space\n");
605 }
606
607 /**
608 * t4_os_pci_read_cfg - read a 8-bit value from PCI config space
609 * @adapter: the adapter
610 * @addr: the register address
611 * @val: where to store the value read
612 *
613 * Read a 8-bit value from the given register in PCI config space.
614 */
t4_os_pci_read_cfg(struct adapter * adapter,size_t addr,u8 * val)615 static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
616 u8 *val)
617 {
618 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
619 addr) < 0)
620 dev_err(adapter, "Can't read from PCI config space\n");
621 }
622
623 /**
624 * t4_os_find_pci_capability - lookup a capability in the PCI capability list
625 * @adapter: the adapter
626 * @cap: the capability
627 *
628 * Return the address of the given capability within the PCI capability list.
629 */
t4_os_find_pci_capability(struct adapter * adapter,int cap)630 static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
631 {
632 u16 status;
633 int ttl = 48;
634 u8 pos = 0;
635 u8 id = 0;
636
637 t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status);
638 if (!(status & PCI_STATUS_CAP_LIST)) {
639 dev_err(adapter, "PCIe capability reading failed\n");
640 return -1;
641 }
642
643 t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos);
644 while (ttl-- && pos >= 0x40) {
645 pos &= ~3;
646 t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id);
647
648 if (id == 0xff)
649 break;
650
651 if (id == cap)
652 return (int)pos;
653
654 t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos);
655 }
656 return 0;
657 }
658
659 /**
660 * t4_os_set_hw_addr - store a port's MAC address in SW
661 * @adapter: the adapter
662 * @port_idx: the port index
663 * @hw_addr: the Ethernet address
664 *
665 * Store the Ethernet address of the given port in SW. Called by the
666 * common code when it retrieves a port's Ethernet address from EEPROM.
667 */
t4_os_set_hw_addr(struct adapter * adapter,int port_idx,u8 hw_addr[])668 static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
669 u8 hw_addr[])
670 {
671 struct port_info *pi = adap2pinfo(adapter, port_idx);
672
673 rte_ether_addr_copy((struct rte_ether_addr *)hw_addr,
674 &pi->eth_dev->data->mac_addrs[0]);
675 }
676
677 /**
678 * t4_os_lock_init - initialize spinlock
679 * @lock: the spinlock
680 */
t4_os_lock_init(rte_spinlock_t * lock)681 static inline void t4_os_lock_init(rte_spinlock_t *lock)
682 {
683 rte_spinlock_init(lock);
684 }
685
686 /**
687 * t4_os_lock - spin until lock is acquired
688 * @lock: the spinlock
689 */
t4_os_lock(rte_spinlock_t * lock)690 static inline void t4_os_lock(rte_spinlock_t *lock)
691 {
692 rte_spinlock_lock(lock);
693 }
694
695 /**
696 * t4_os_unlock - unlock a spinlock
697 * @lock: the spinlock
698 */
t4_os_unlock(rte_spinlock_t * lock)699 static inline void t4_os_unlock(rte_spinlock_t *lock)
700 {
701 rte_spinlock_unlock(lock);
702 }
703
704 /**
705 * t4_os_trylock - try to get a lock
706 * @lock: the spinlock
707 */
t4_os_trylock(rte_spinlock_t * lock)708 static inline int t4_os_trylock(rte_spinlock_t *lock)
709 {
710 return rte_spinlock_trylock(lock);
711 }
712
713 /**
714 * t4_os_init_list_head - initialize
715 * @head: head of list to initialize [to empty]
716 */
t4_os_init_list_head(struct mbox_list * head)717 static inline void t4_os_init_list_head(struct mbox_list *head)
718 {
719 TAILQ_INIT(head);
720 }
721
t4_os_list_first_entry(struct mbox_list * head)722 static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head)
723 {
724 return TAILQ_FIRST(head);
725 }
726
727 /**
728 * t4_os_atomic_add_tail - Enqueue list element atomically onto list
729 * @new: the entry to be addded to the queue
730 * @head: current head of the linked list
731 * @lock: lock to use to guarantee atomicity
732 */
t4_os_atomic_add_tail(struct mbox_entry * entry,struct mbox_list * head,rte_spinlock_t * lock)733 static inline void t4_os_atomic_add_tail(struct mbox_entry *entry,
734 struct mbox_list *head,
735 rte_spinlock_t *lock)
736 {
737 t4_os_lock(lock);
738 TAILQ_INSERT_TAIL(head, entry, next);
739 t4_os_unlock(lock);
740 }
741
742 /**
743 * t4_os_atomic_list_del - Dequeue list element atomically from list
744 * @entry: the entry to be remove/dequeued from the list.
745 * @lock: the spinlock
746 */
t4_os_atomic_list_del(struct mbox_entry * entry,struct mbox_list * head,rte_spinlock_t * lock)747 static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
748 struct mbox_list *head,
749 rte_spinlock_t *lock)
750 {
751 t4_os_lock(lock);
752 TAILQ_REMOVE(head, entry, next);
753 t4_os_unlock(lock);
754 }
755
756 /**
757 * t4_init_completion - initialize completion
758 * @c: the completion context
759 */
t4_init_completion(struct t4_completion * c)760 static inline void t4_init_completion(struct t4_completion *c)
761 {
762 c->done = 0;
763 t4_os_lock_init(&c->lock);
764 }
765
766 /**
767 * t4_complete - set completion as done
768 * @c: the completion context
769 */
t4_complete(struct t4_completion * c)770 static inline void t4_complete(struct t4_completion *c)
771 {
772 t4_os_lock(&c->lock);
773 c->done = 1;
774 t4_os_unlock(&c->lock);
775 }
776
777 /**
778 * cxgbe_port_viid - get the VI id of a port
779 * @dev: the device for the port
780 *
781 * Return the VI id of the given port.
782 */
cxgbe_port_viid(const struct rte_eth_dev * dev)783 static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
784 {
785 return ethdev2pinfo(dev)->viid;
786 }
787
788 void *t4_alloc_mem(size_t size);
789 void t4_free_mem(void *addr);
790 #define t4_os_alloc(_size) t4_alloc_mem((_size))
791 #define t4_os_free(_ptr) t4_free_mem((_ptr))
792
793 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
794 void t4_os_link_changed(struct adapter *adap, int port_id);
795
796 void reclaim_completed_tx(struct sge_txq *q);
797 void t4_free_sge_resources(struct adapter *adap);
798 void t4_sge_tx_monitor_start(struct adapter *adap);
799 void t4_sge_tx_monitor_stop(struct adapter *adap);
800 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
801 uint16_t nb_pkts);
802 int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
803 int t4_sge_init(struct adapter *adap);
804 int t4vf_sge_init(struct adapter *adap);
805 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
806 struct rte_eth_dev *eth_dev, uint16_t queue_id,
807 unsigned int iqid, int socket_id);
808 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
809 struct rte_eth_dev *eth_dev, uint16_t queue_id,
810 unsigned int iqid, int socket_id);
811 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
812 struct rte_eth_dev *eth_dev, int intr_idx,
813 struct sge_fl *fl, rspq_handler_t handler,
814 int cong, struct rte_mempool *mp, int queue_id,
815 int socket_id);
816 int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
817 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
818 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
819 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq);
820 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq);
821 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
822 void t4_sge_eth_clear_queues(struct port_info *pi);
823 void t4_sge_eth_release_queues(struct port_info *pi);
824 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
825 unsigned int cnt);
826 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
827 unsigned int budget, unsigned int *work_done);
828 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
829 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
830
831 #endif /* __T4_ADAPTER_H__ */
832