xref: /f-stack/dpdk/drivers/net/cxgbe/base/adapter.h (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 
6 /* This file should not be included directly.  Include common.h instead. */
7 
8 #ifndef __T4_ADAPTER_H__
9 #define __T4_ADAPTER_H__
10 
11 #include <rte_bus_pci.h>
12 #include <rte_mbuf.h>
13 #include <rte_io.h>
14 #include <rte_rwlock.h>
15 #include <rte_ethdev.h>
16 
17 #include "../cxgbe_compat.h"
18 #include "../cxgbe_ofld.h"
19 #include "t4_regs_values.h"
20 
21 enum {
22 	MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
23 };
24 
25 struct adapter;
26 struct sge_rspq;
27 
28 enum {
29 	PORT_RSS_DONE = (1 << 0),
30 };
31 
32 struct port_info {
33 	struct adapter *adapter;        /* adapter that this port belongs to */
34 	struct rte_eth_dev *eth_dev;    /* associated rte eth device */
35 	struct port_stats stats_base;   /* port statistics base */
36 	struct link_config link_cfg;    /* link configuration info */
37 
38 	unsigned long flags;            /* port related flags */
39 	short int xact_addr_filt;       /* index of exact MAC address filter */
40 
41 	u16    viid;                    /* associated virtual interface id */
42 	s8     mdio_addr;               /* address of the PHY */
43 	u8     port_type;               /* firmware port type */
44 	u8     mod_type;                /* firmware module type */
45 	u8     port_id;                 /* physical port ID */
46 	u8     pidx;			/* port index for this PF */
47 	u8     tx_chan;                 /* associated channel */
48 
49 	u16    n_rx_qsets;              /* # of rx qsets */
50 	u16    n_tx_qsets;              /* # of tx qsets */
51 	u16    first_rxqset;            /* index of first rxqset */
52 	u16    first_txqset;            /* index of first txqset */
53 
54 	u16    *rss;                    /* rss table */
55 	u8     rss_mode;                /* rss mode */
56 	u16    rss_size;                /* size of VI's RSS table slice */
57 	u64    rss_hf;			/* RSS Hash Function */
58 
59 	/* viid fields either returned by fw
60 	 * or decoded by parsing viid by driver.
61 	 */
62 	u8 vin;
63 	u8 vivld;
64 };
65 
66 /* Enable or disable autonegotiation.  If this is set to enable,
67  * the forced link modes above are completely ignored.
68  */
69 #define AUTONEG_DISABLE         0x00
70 #define AUTONEG_ENABLE          0x01
71 
72 enum {                                 /* adapter flags */
73 	FULL_INIT_DONE     = (1 << 0),
74 	USING_MSI          = (1 << 1),
75 	USING_MSIX         = (1 << 2),
76 	FW_QUEUE_BOUND     = (1 << 3),
77 	FW_OK              = (1 << 4),
78 	CFG_QUEUES	   = (1 << 5),
79 	MASTER_PF          = (1 << 6),
80 };
81 
82 struct rx_sw_desc {                /* SW state per Rx descriptor */
83 	void *buf;                 /* struct page or mbuf */
84 	dma_addr_t dma_addr;
85 };
86 
87 struct sge_fl {                     /* SGE free-buffer queue state */
88 	/* RO fields */
89 	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
90 
91 	dma_addr_t addr;            /* bus address of HW ring start */
92 	__be64 *desc;               /* address of HW Rx descriptor ring */
93 
94 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
95 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
96 
97 	unsigned int cntxt_id;      /* SGE relative QID for the free list */
98 	unsigned int size;          /* capacity of free list */
99 
100 	unsigned int avail;         /* # of available Rx buffers */
101 	unsigned int pend_cred;     /* new buffers since last FL DB ring */
102 	unsigned int cidx;          /* consumer index */
103 	unsigned int pidx;          /* producer index */
104 
105 	unsigned long alloc_failed; /* # of times buffer allocation failed */
106 	unsigned long low;          /* # of times momentarily starving */
107 };
108 
109 #define MAX_MBUF_FRAGS (16384 / 512 + 2)
110 
111 /* A packet gather list */
112 struct pkt_gl {
113 	union {
114 		struct rte_mbuf *mbufs[MAX_MBUF_FRAGS];
115 	} /* UNNAMED */;
116 	void *va;                         /* virtual address of first byte */
117 	unsigned int nfrags;              /* # of fragments */
118 	unsigned int tot_len;             /* total length of fragments */
119 	bool usembufs;                    /* use mbufs for fragments */
120 };
121 
122 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
123 			      const struct pkt_gl *gl);
124 
125 struct sge_rspq {                   /* state for an SGE response queue */
126 	struct adapter *adapter;      /* adapter that this queue belongs to */
127 	struct rte_eth_dev *eth_dev;  /* associated rte eth device */
128 	struct rte_mempool  *mb_pool; /* associated mempool */
129 
130 	dma_addr_t phys_addr;       /* physical address of the ring */
131 	__be64 *desc;               /* address of HW response ring */
132 	const __be64 *cur_desc;     /* current descriptor in queue */
133 
134 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
135 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
136 	struct sge_qstat *stat;
137 
138 	unsigned int cidx;          /* consumer index */
139 	unsigned int gts_idx;	    /* last gts write sent */
140 	unsigned int iqe_len;       /* entry size */
141 	unsigned int size;          /* capacity of response queue */
142 	int offset;                 /* offset into current Rx buffer */
143 
144 	u8 gen;                     /* current generation bit */
145 	u8 intr_params;             /* interrupt holdoff parameters */
146 	u8 next_intr_params;        /* holdoff params for next interrupt */
147 	u8 pktcnt_idx;              /* interrupt packet threshold */
148 	u8 port_id;		    /* associated port-id */
149 	u8 idx;                     /* queue index within its group */
150 	u16 cntxt_id;               /* SGE relative QID for the response Q */
151 	u16 abs_id;                 /* absolute SGE id for the response q */
152 
153 	rspq_handler_t handler;     /* associated handler for this response q */
154 };
155 
156 struct sge_eth_rx_stats {	/* Ethernet rx queue statistics */
157 	u64 pkts;		/* # of ethernet packets */
158 	u64 rx_bytes;		/* # of ethernet bytes */
159 	u64 rx_cso;		/* # of Rx checksum offloads */
160 	u64 vlan_ex;		/* # of Rx VLAN extractions */
161 	u64 rx_drops;		/* # of packets dropped due to no mem */
162 };
163 
164 struct sge_eth_rxq {                /* a SW Ethernet Rx queue */
165 	unsigned int flags;         /* flags for state of the queue */
166 	struct sge_rspq rspq;
167 	struct sge_fl fl;
168 	struct sge_eth_rx_stats stats;
169 	bool usembufs;               /* one ingress packet per mbuf FL buffer */
170 } __rte_cache_aligned;
171 
172 /*
173  * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per
174  * packet (if one sgl is present) and type 1 needs 32 bytes. This means
175  * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit
176  * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR
177  * to be able to free those mbufs when we get completions back from the FW.
178  * Allocating the maximum number of pointers in every tx desc is a waste
179  * of memory resources so we only store 2 pointers per tx desc which should
180  * be enough since a tx desc can only fit 2 packets in the best case
181  * scenario where a packet needs 32 bytes.
182  */
183 #define ETH_COALESCE_PKT_NUM 15
184 #define ETH_COALESCE_VF_PKT_NUM 7
185 #define ETH_COALESCE_PKT_PER_DESC 2
186 
187 struct tx_eth_coal_desc {
188 	struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC];
189 	struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC];
190 	int idx;
191 };
192 
193 struct tx_desc {
194 	__be64 flit[8];
195 };
196 
197 struct tx_sw_desc {                /* SW state per Tx descriptor */
198 	struct rte_mbuf *mbuf;
199 	struct ulptx_sgl *sgl;
200 	struct tx_eth_coal_desc coalesce;
201 };
202 
203 enum cxgbe_txq_state {
204 	EQ_STOPPED = (1 << 0),
205 };
206 
207 enum cxgbe_rxq_state {
208 	IQ_STOPPED = (1 << 0),
209 };
210 
211 struct eth_coalesce {
212 	unsigned char *ptr;
213 	unsigned char type;
214 	unsigned int idx;
215 	unsigned int len;
216 	unsigned int flits;
217 	unsigned int max;
218 	__u8 ethmacdst[ETHER_ADDR_LEN];
219 	__u8 ethmacsrc[ETHER_ADDR_LEN];
220 	__be16 ethtype;
221 	__be16 vlantci;
222 };
223 
224 struct sge_txq {
225 	struct tx_desc *desc;       /* address of HW Tx descriptor ring */
226 	struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
227 	struct sge_qstat *stat;     /* queue status entry */
228 	struct eth_coalesce coalesce; /* coalesce info */
229 
230 	uint64_t phys_addr;         /* physical address of the ring */
231 
232 	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
233 	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
234 
235 	unsigned int cntxt_id;     /* SGE relative QID for the Tx Q */
236 	unsigned int in_use;       /* # of in-use Tx descriptors */
237 	unsigned int size;         /* # of descriptors */
238 	unsigned int cidx;         /* SW consumer index */
239 	unsigned int pidx;         /* producer index */
240 	unsigned int dbidx;	   /* last idx when db ring was done */
241 	unsigned int equeidx;	   /* last sent credit request */
242 	unsigned int last_pidx;	   /* last pidx recorded by tx monitor */
243 	unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
244 	unsigned int abs_id;
245 
246 	int db_disabled;            /* doorbell state */
247 	unsigned short db_pidx;     /* doorbell producer index */
248 	unsigned short db_pidx_inc; /* doorbell producer increment */
249 };
250 
251 struct sge_eth_tx_stats {	/* Ethernet tx queue statistics */
252 	u64 pkts;		/* # of ethernet packets */
253 	u64 tx_bytes;		/* # of ethernet bytes */
254 	u64 tso;		/* # of TSO requests */
255 	u64 tx_cso;		/* # of Tx checksum offloads */
256 	u64 vlan_ins;		/* # of Tx VLAN insertions */
257 	u64 mapping_err;	/* # of I/O MMU packet mapping errors */
258 	u64 coal_wr;            /* # of coalesced wr */
259 	u64 coal_pkts;          /* # of coalesced packets */
260 };
261 
262 struct sge_eth_txq {                   /* state for an SGE Ethernet Tx queue */
263 	struct sge_txq q;
264 	struct rte_eth_dev *eth_dev;   /* port that this queue belongs to */
265 	struct rte_eth_dev_data *data;
266 	struct sge_eth_tx_stats stats; /* queue statistics */
267 	rte_spinlock_t txq_lock;
268 
269 	unsigned int flags;            /* flags for state of the queue */
270 } __rte_cache_aligned;
271 
272 struct sge_ctrl_txq {                /* State for an SGE control Tx queue */
273 	struct sge_txq q;            /* txq */
274 	struct adapter *adapter;     /* adapter associated with this queue */
275 	rte_spinlock_t ctrlq_lock;   /* control queue lock */
276 	u8 full;                     /* the Tx ring is full */
277 	u64 txp;                     /* number of transmits */
278 	struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
279 } __rte_cache_aligned;
280 
281 struct sge {
282 	struct sge_eth_txq *ethtxq;
283 	struct sge_eth_rxq *ethrxq;
284 	struct sge_rspq fw_evtq __rte_cache_aligned;
285 	struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
286 
287 	u16 max_ethqsets;           /* # of available Ethernet queue sets */
288 	u32 stat_len;               /* length of status page at ring end */
289 	u32 pktshift;               /* padding between CPL & packet data */
290 
291 	/* response queue interrupt parameters */
292 	u16 timer_val[SGE_NTIMERS];
293 	u8  counter_val[SGE_NCOUNTERS];
294 
295 	u32 fl_align;               /* response queue message alignment */
296 	u32 fl_pg_order;            /* large page allocation size */
297 	u32 fl_starve_thres;        /* Free List starvation threshold */
298 };
299 
300 #define T4_OS_NEEDS_MBOX_LOCKING 1
301 
302 /*
303  * OS Lock/List primitives for those interfaces in the Common Code which
304  * need this.
305  */
306 
307 struct mbox_entry {
308 	TAILQ_ENTRY(mbox_entry) next;
309 };
310 
311 TAILQ_HEAD(mbox_list, mbox_entry);
312 
313 struct adapter_devargs {
314 	bool keep_ovlan;
315 	bool force_link_up;
316 	bool tx_mode_latency;
317 	u32 filtermode;
318 	u32 filtermask;
319 };
320 
321 struct adapter {
322 	struct rte_pci_device *pdev;       /* associated rte pci device */
323 	struct rte_eth_dev *eth_dev;       /* first port's rte eth device */
324 	struct adapter_params params;      /* adapter parameters */
325 	struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
326 	struct sge sge;                    /* associated SGE */
327 
328 	/* support for single-threading access to adapter mailbox registers */
329 	struct mbox_list mbox_list;
330 	rte_spinlock_t mbox_lock;
331 
332 	u8 *regs;              /* pointer to registers region */
333 	u8 *bar2;              /* pointer to bar2 region */
334 	unsigned long flags;   /* adapter flags */
335 	unsigned int mbox;     /* associated mailbox */
336 	unsigned int pf;       /* associated physical function id */
337 
338 	unsigned int vpd_busy;
339 	unsigned int vpd_flag;
340 
341 	int use_unpacked_mode; /* unpacked rx mode state */
342 	rte_spinlock_t win0_lock;
343 
344 	rte_spinlock_t flow_lock; /* Serialize access for rte_flow ops */
345 
346 	unsigned int clipt_start; /* CLIP table start */
347 	unsigned int clipt_end;   /* CLIP table end */
348 	unsigned int l2t_start;   /* Layer 2 table start */
349 	unsigned int l2t_end;     /* Layer 2 table end */
350 	struct clip_tbl *clipt;   /* CLIP table */
351 	struct l2t_data *l2t;     /* Layer 2 table */
352 	struct smt_data *smt;     /* Source mac table */
353 	struct mpstcam_table *mpstcam;
354 
355 	struct tid_info tids;     /* Info used to access TID related tables */
356 
357 	struct adapter_devargs devargs;
358 };
359 
360 /**
361  * t4_os_rwlock_init - initialize rwlock
362  * @lock: the rwlock
363  */
t4_os_rwlock_init(rte_rwlock_t * lock)364 static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
365 {
366 	rte_rwlock_init(lock);
367 }
368 
369 /**
370  * t4_os_write_lock - get a write lock
371  * @lock: the rwlock
372  */
t4_os_write_lock(rte_rwlock_t * lock)373 static inline void t4_os_write_lock(rte_rwlock_t *lock)
374 {
375 	rte_rwlock_write_lock(lock);
376 }
377 
378 /**
379  * t4_os_write_unlock - unlock a write lock
380  * @lock: the rwlock
381  */
t4_os_write_unlock(rte_rwlock_t * lock)382 static inline void t4_os_write_unlock(rte_rwlock_t *lock)
383 {
384 	rte_rwlock_write_unlock(lock);
385 }
386 
387 /**
388  * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
389  * @dev: the rte_eth_dev
390  *
391  * Return the struct port_info associated with a rte_eth_dev
392  */
ethdev2pinfo(const struct rte_eth_dev * dev)393 static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
394 {
395 	return dev->data->dev_private;
396 }
397 
398 /**
399  * adap2pinfo - return the port_info of a port
400  * @adap: the adapter
401  * @idx: the port index
402  *
403  * Return the port_info structure for the port of the given index.
404  */
adap2pinfo(const struct adapter * adap,int idx)405 static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
406 {
407 	return adap->port[idx];
408 }
409 
410 /**
411  * ethdev2adap - return the adapter structure associated with a rte_eth_dev
412  * @dev: the rte_eth_dev
413  *
414  * Return the struct adapter associated with a rte_eth_dev
415  */
ethdev2adap(const struct rte_eth_dev * dev)416 static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
417 {
418 	return ethdev2pinfo(dev)->adapter;
419 }
420 
421 #define CXGBE_PCI_REG(reg) rte_read32(reg)
422 
cxgbe_read_addr64(volatile void * addr)423 static inline uint64_t cxgbe_read_addr64(volatile void *addr)
424 {
425 	uint64_t val = CXGBE_PCI_REG(addr);
426 	uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4));
427 
428 	val2 = (uint64_t)(val2 << 32);
429 	val += val2;
430 	return val;
431 }
432 
cxgbe_read_addr(volatile void * addr)433 static inline uint32_t cxgbe_read_addr(volatile void *addr)
434 {
435 	return CXGBE_PCI_REG(addr);
436 }
437 
438 #define CXGBE_PCI_REG_ADDR(adap, reg) \
439 	((volatile uint32_t *)((char *)(adap)->regs + (reg)))
440 
441 #define CXGBE_READ_REG(adap, reg) \
442 	cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg)))
443 
444 #define CXGBE_READ_REG64(adap, reg) \
445 	cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
446 
447 #define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
448 
449 #define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
450 	rte_write32_relaxed((value), (reg))
451 
452 #define CXGBE_WRITE_REG(adap, reg, value) \
453 	CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
454 
455 #define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \
456 	CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
457 
cxgbe_write_addr64(volatile void * addr,uint64_t val)458 static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
459 {
460 	CXGBE_PCI_REG_WRITE(addr, val);
461 	CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32));
462 	return val;
463 }
464 
465 #define CXGBE_WRITE_REG64(adap, reg, value) \
466 	cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
467 
468 /**
469  * t4_read_reg - read a HW register
470  * @adapter: the adapter
471  * @reg_addr: the register address
472  *
473  * Returns the 32-bit value of the given HW register.
474  */
t4_read_reg(struct adapter * adapter,u32 reg_addr)475 static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
476 {
477 	return CXGBE_READ_REG(adapter, reg_addr);
478 }
479 
480 /**
481  * t4_write_reg - write a HW register with barrier
482  * @adapter: the adapter
483  * @reg_addr: the register address
484  * @val: the value to write
485  *
486  * Write a 32-bit value into the given HW register.
487  */
t4_write_reg(struct adapter * adapter,u32 reg_addr,u32 val)488 static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
489 {
490 	CXGBE_WRITE_REG(adapter, reg_addr, val);
491 }
492 
493 /**
494  * t4_write_reg_relaxed - write a HW register with no barrier
495  * @adapter: the adapter
496  * @reg_addr: the register address
497  * @val: the value to write
498  *
499  * Write a 32-bit value into the given HW register.
500  */
t4_write_reg_relaxed(struct adapter * adapter,u32 reg_addr,u32 val)501 static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr,
502 					u32 val)
503 {
504 	CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val);
505 }
506 
507 /**
508  * t4_read_reg64 - read a 64-bit HW register
509  * @adapter: the adapter
510  * @reg_addr: the register address
511  *
512  * Returns the 64-bit value of the given HW register.
513  */
t4_read_reg64(struct adapter * adapter,u32 reg_addr)514 static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
515 {
516 	return CXGBE_READ_REG64(adapter, reg_addr);
517 }
518 
519 /**
520  * t4_write_reg64 - write a 64-bit HW register
521  * @adapter: the adapter
522  * @reg_addr: the register address
523  * @val: the value to write
524  *
525  * Write a 64-bit value into the given HW register.
526  */
t4_write_reg64(struct adapter * adapter,u32 reg_addr,u64 val)527 static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
528 				  u64 val)
529 {
530 	CXGBE_WRITE_REG64(adapter, reg_addr, val);
531 }
532 
533 #define PCI_STATUS              0x06    /* 16 bits */
534 #define PCI_STATUS_CAP_LIST     0x10    /* Support Capability List */
535 #define PCI_CAPABILITY_LIST     0x34
536 /* Offset of first capability list entry */
537 #define PCI_CAP_ID_EXP          0x10    /* PCI Express */
538 #define PCI_CAP_LIST_ID         0       /* Capability ID */
539 #define PCI_CAP_LIST_NEXT       1       /* Next capability in the list */
540 #define PCI_EXP_DEVCTL          0x0008  /* Device control */
541 #define PCI_EXP_DEVCTL2         40      /* Device Control 2 */
542 #define PCI_EXP_DEVCTL_EXT_TAG  0x0100  /* Extended Tag Field Enable */
543 #define PCI_EXP_DEVCTL_PAYLOAD  0x00E0  /* Max payload */
544 #define PCI_CAP_ID_VPD          0x03    /* Vital Product Data */
545 #define PCI_VPD_ADDR            2       /* Address to access (15 bits!) */
546 #define PCI_VPD_ADDR_F          0x8000  /* Write 0, 1 indicates completion */
547 #define PCI_VPD_DATA            4       /* 32-bits of data returned here */
548 
549 /**
550  * t4_os_pci_write_cfg4 - 32-bit write to PCI config space
551  * @adapter: the adapter
552  * @addr: the register address
553  * @val: the value to write
554  *
555  * Write a 32-bit value into the given register in PCI config space.
556  */
t4_os_pci_write_cfg4(struct adapter * adapter,size_t addr,off_t val)557 static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
558 					off_t val)
559 {
560 	u32 val32 = val;
561 
562 	if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32),
563 				     addr) < 0)
564 		dev_err(adapter, "Can't write to PCI config space\n");
565 }
566 
567 /**
568  * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space
569  * @adapter: the adapter
570  * @addr: the register address
571  * @val: where to store the value read
572  *
573  * Read a 32-bit value from the given register in PCI config space.
574  */
t4_os_pci_read_cfg4(struct adapter * adapter,size_t addr,u32 * val)575 static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
576 				       u32 *val)
577 {
578 	if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
579 				    addr) < 0)
580 		dev_err(adapter, "Can't read from PCI config space\n");
581 }
582 
583 /**
584  * t4_os_pci_write_cfg2 - 16-bit write to PCI config space
585  * @adapter: the adapter
586  * @addr: the register address
587  * @val: the value to write
588  *
589  * Write a 16-bit value into the given register in PCI config space.
590  */
t4_os_pci_write_cfg2(struct adapter * adapter,size_t addr,off_t val)591 static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
592 					off_t val)
593 {
594 	u16 val16 = val;
595 
596 	if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16),
597 				     addr) < 0)
598 		dev_err(adapter, "Can't write to PCI config space\n");
599 }
600 
601 /**
602  * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space
603  * @adapter: the adapter
604  * @addr: the register address
605  * @val: where to store the value read
606  *
607  * Read a 16-bit value from the given register in PCI config space.
608  */
t4_os_pci_read_cfg2(struct adapter * adapter,size_t addr,u16 * val)609 static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
610 				       u16 *val)
611 {
612 	if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
613 				    addr) < 0)
614 		dev_err(adapter, "Can't read from PCI config space\n");
615 }
616 
617 /**
618  * t4_os_pci_read_cfg - read a 8-bit value from PCI config space
619  * @adapter: the adapter
620  * @addr: the register address
621  * @val: where to store the value read
622  *
623  * Read a 8-bit value from the given register in PCI config space.
624  */
t4_os_pci_read_cfg(struct adapter * adapter,size_t addr,u8 * val)625 static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
626 				      u8 *val)
627 {
628 	if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
629 				    addr) < 0)
630 		dev_err(adapter, "Can't read from PCI config space\n");
631 }
632 
633 /**
634  * t4_os_find_pci_capability - lookup a capability in the PCI capability list
635  * @adapter: the adapter
636  * @cap: the capability
637  *
638  * Return the address of the given capability within the PCI capability list.
639  */
t4_os_find_pci_capability(struct adapter * adapter,int cap)640 static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
641 {
642 	u16 status;
643 	int ttl = 48;
644 	u8 pos = 0;
645 	u8 id = 0;
646 
647 	t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status);
648 	if (!(status & PCI_STATUS_CAP_LIST)) {
649 		dev_err(adapter, "PCIe capability reading failed\n");
650 		return -1;
651 	}
652 
653 	t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos);
654 	while (ttl-- && pos >= 0x40) {
655 		pos &= ~3;
656 		t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id);
657 
658 		if (id == 0xff)
659 			break;
660 
661 		if (id == cap)
662 			return (int)pos;
663 
664 		t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos);
665 	}
666 	return 0;
667 }
668 
669 /**
670  * t4_os_set_hw_addr - store a port's MAC address in SW
671  * @adapter: the adapter
672  * @port_idx: the port index
673  * @hw_addr: the Ethernet address
674  *
675  * Store the Ethernet address of the given port in SW.  Called by the
676  * common code when it retrieves a port's Ethernet address from EEPROM.
677  */
t4_os_set_hw_addr(struct adapter * adapter,int port_idx,u8 hw_addr[])678 static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
679 				     u8 hw_addr[])
680 {
681 	struct port_info *pi = adap2pinfo(adapter, port_idx);
682 
683 	rte_ether_addr_copy((struct rte_ether_addr *)hw_addr,
684 			&pi->eth_dev->data->mac_addrs[0]);
685 }
686 
687 /**
688  * t4_os_lock_init - initialize spinlock
689  * @lock: the spinlock
690  */
t4_os_lock_init(rte_spinlock_t * lock)691 static inline void t4_os_lock_init(rte_spinlock_t *lock)
692 {
693 	rte_spinlock_init(lock);
694 }
695 
696 /**
697  * t4_os_lock - spin until lock is acquired
698  * @lock: the spinlock
699  */
t4_os_lock(rte_spinlock_t * lock)700 static inline void t4_os_lock(rte_spinlock_t *lock)
701 {
702 	rte_spinlock_lock(lock);
703 }
704 
705 /**
706  * t4_os_unlock - unlock a spinlock
707  * @lock: the spinlock
708  */
t4_os_unlock(rte_spinlock_t * lock)709 static inline void t4_os_unlock(rte_spinlock_t *lock)
710 {
711 	rte_spinlock_unlock(lock);
712 }
713 
714 /**
715  * t4_os_trylock - try to get a lock
716  * @lock: the spinlock
717  */
t4_os_trylock(rte_spinlock_t * lock)718 static inline int t4_os_trylock(rte_spinlock_t *lock)
719 {
720 	return rte_spinlock_trylock(lock);
721 }
722 
723 /**
724  * t4_os_init_list_head - initialize
725  * @head: head of list to initialize [to empty]
726  */
t4_os_init_list_head(struct mbox_list * head)727 static inline void t4_os_init_list_head(struct mbox_list *head)
728 {
729 	TAILQ_INIT(head);
730 }
731 
t4_os_list_first_entry(struct mbox_list * head)732 static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head)
733 {
734 	return TAILQ_FIRST(head);
735 }
736 
737 /**
738  * t4_os_atomic_add_tail - Enqueue list element atomically onto list
739  * @new: the entry to be addded to the queue
740  * @head: current head of the linked list
741  * @lock: lock to use to guarantee atomicity
742  */
t4_os_atomic_add_tail(struct mbox_entry * entry,struct mbox_list * head,rte_spinlock_t * lock)743 static inline void t4_os_atomic_add_tail(struct mbox_entry *entry,
744 					 struct mbox_list *head,
745 					 rte_spinlock_t *lock)
746 {
747 	t4_os_lock(lock);
748 	TAILQ_INSERT_TAIL(head, entry, next);
749 	t4_os_unlock(lock);
750 }
751 
752 /**
753  * t4_os_atomic_list_del - Dequeue list element atomically from list
754  * @entry: the entry to be remove/dequeued from the list.
755  * @lock: the spinlock
756  */
t4_os_atomic_list_del(struct mbox_entry * entry,struct mbox_list * head,rte_spinlock_t * lock)757 static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
758 					 struct mbox_list *head,
759 					 rte_spinlock_t *lock)
760 {
761 	t4_os_lock(lock);
762 	TAILQ_REMOVE(head, entry, next);
763 	t4_os_unlock(lock);
764 }
765 
766 /**
767  * t4_init_completion - initialize completion
768  * @c: the completion context
769  */
t4_init_completion(struct t4_completion * c)770 static inline void t4_init_completion(struct t4_completion *c)
771 {
772 	c->done = 0;
773 	t4_os_lock_init(&c->lock);
774 }
775 
776 /**
777  * t4_complete - set completion as done
778  * @c: the completion context
779  */
t4_complete(struct t4_completion * c)780 static inline void t4_complete(struct t4_completion *c)
781 {
782 	t4_os_lock(&c->lock);
783 	c->done = 1;
784 	t4_os_unlock(&c->lock);
785 }
786 
787 /**
788  * cxgbe_port_viid - get the VI id of a port
789  * @dev: the device for the port
790  *
791  * Return the VI id of the given port.
792  */
cxgbe_port_viid(const struct rte_eth_dev * dev)793 static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
794 {
795 	return ethdev2pinfo(dev)->viid;
796 }
797 
798 void *t4_alloc_mem(size_t size);
799 void t4_free_mem(void *addr);
800 #define t4_os_alloc(_size)     t4_alloc_mem((_size))
801 #define t4_os_free(_ptr)       t4_free_mem((_ptr))
802 
803 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
804 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
805 
806 void reclaim_completed_tx(struct sge_txq *q);
807 void t4_free_sge_resources(struct adapter *adap);
808 void t4_sge_tx_monitor_start(struct adapter *adap);
809 void t4_sge_tx_monitor_stop(struct adapter *adap);
810 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
811 		uint16_t nb_pkts);
812 int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
813 int t4_sge_init(struct adapter *adap);
814 int t4vf_sge_init(struct adapter *adap);
815 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
816 			 struct rte_eth_dev *eth_dev, uint16_t queue_id,
817 			 unsigned int iqid, int socket_id);
818 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
819 			  struct rte_eth_dev *eth_dev, uint16_t queue_id,
820 			  unsigned int iqid, int socket_id);
821 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
822 		     struct rte_eth_dev *eth_dev, int intr_idx,
823 		     struct sge_fl *fl, rspq_handler_t handler,
824 		     int cong, struct rte_mempool *mp, int queue_id,
825 		     int socket_id);
826 int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
827 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
828 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
829 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq);
830 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq);
831 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
832 void t4_sge_eth_clear_queues(struct port_info *pi);
833 void t4_sge_eth_release_queues(struct port_info *pi);
834 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
835 			       unsigned int cnt);
836 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
837 	       unsigned int budget, unsigned int *work_done);
838 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
839 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
840 
841 #endif /* __T4_ADAPTER_H__ */
842