1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: Navdeep Parhar <[email protected]>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 *
31 */
32
33 #ifndef __T4_ADAPTER_H__
34 #define __T4_ADAPTER_H__
35
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/rman.h>
39 #include <sys/types.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/rwlock.h>
43 #include <sys/sx.h>
44 #include <sys/vmem.h>
45 #include <vm/uma.h>
46
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <machine/bus.h>
50 #include <sys/socket.h>
51 #include <sys/sysctl.h>
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_media.h>
56 #include <netinet/in.h>
57 #include <netinet/tcp_lro.h>
58
59 #include "offload.h"
60 #include "t4_ioctl.h"
61 #include "common/t4_msg.h"
62 #include "firmware/t4fw_interface.h"
63
64 #define KTR_CXGBE KTR_SPARE3
65 MALLOC_DECLARE(M_CXGBE);
66 #define CXGBE_UNIMPLEMENTED(s) \
67 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
68
69 #if defined(__i386__) || defined(__amd64__)
70 static __inline void
prefetch(void * x)71 prefetch(void *x)
72 {
73 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
74 }
75 #else
76 #define prefetch(x) __builtin_prefetch(x)
77 #endif
78
79 #ifndef SYSCTL_ADD_UQUAD
80 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
81 #define sysctl_handle_64 sysctl_handle_quad
82 #define CTLTYPE_U64 CTLTYPE_QUAD
83 #endif
84
85 SYSCTL_DECL(_hw_cxgbe);
86
87 struct adapter;
88 typedef struct adapter adapter_t;
89
90 enum {
91 /*
92 * All ingress queues use this entry size. Note that the firmware event
93 * queue and any iq expecting CPL_RX_PKT in the descriptor needs this to
94 * be at least 64.
95 */
96 IQ_ESIZE = 64,
97
98 /* Default queue sizes for all kinds of ingress queues */
99 FW_IQ_QSIZE = 256,
100 RX_IQ_QSIZE = 1024,
101
102 /* All egress queues use this entry size */
103 EQ_ESIZE = 64,
104
105 /* Default queue sizes for all kinds of egress queues */
106 CTRL_EQ_QSIZE = 1024,
107 TX_EQ_QSIZE = 1024,
108
109 #if MJUMPAGESIZE != MCLBYTES
110 SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
111 #else
112 SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
113 #endif
114 CL_METADATA_SIZE = CACHE_LINE_SIZE,
115
116 SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */
117 TX_SGL_SEGS = 39,
118 TX_SGL_SEGS_TSO = 38,
119 TX_SGL_SEGS_EO_TSO = 30, /* XXX: lower for IPv6. */
120 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
121 };
122
123 enum {
124 /* adapter intr_type */
125 INTR_INTX = (1 << 0),
126 INTR_MSI = (1 << 1),
127 INTR_MSIX = (1 << 2)
128 };
129
130 enum {
131 XGMAC_MTU = (1 << 0),
132 XGMAC_PROMISC = (1 << 1),
133 XGMAC_ALLMULTI = (1 << 2),
134 XGMAC_VLANEX = (1 << 3),
135 XGMAC_UCADDR = (1 << 4),
136 XGMAC_MCADDRS = (1 << 5),
137
138 XGMAC_ALL = 0xffff
139 };
140
141 enum {
142 /* flags understood by begin_synchronized_op */
143 HOLD_LOCK = (1 << 0),
144 SLEEP_OK = (1 << 1),
145 INTR_OK = (1 << 2),
146
147 /* flags understood by end_synchronized_op */
148 LOCK_HELD = HOLD_LOCK,
149 };
150
151 enum {
152 /* adapter flags */
153 FULL_INIT_DONE = (1 << 0),
154 FW_OK = (1 << 1),
155 CHK_MBOX_ACCESS = (1 << 2),
156 MASTER_PF = (1 << 3),
157 ADAP_SYSCTL_CTX = (1 << 4),
158 ADAP_ERR = (1 << 5),
159 BUF_PACKING_OK = (1 << 6),
160 IS_VF = (1 << 7),
161
162 CXGBE_BUSY = (1 << 9),
163
164 /* port flags */
165 HAS_TRACEQ = (1 << 3),
166 FIXED_IFMEDIA = (1 << 4), /* ifmedia list doesn't change. */
167
168 /* VI flags */
169 DOOMED = (1 << 0),
170 VI_INIT_DONE = (1 << 1),
171 VI_SYSCTL_CTX = (1 << 2),
172
173 /* adapter debug_flags */
174 DF_DUMP_MBOX = (1 << 0), /* Log all mbox cmd/rpl. */
175 DF_LOAD_FW_ANYTIME = (1 << 1), /* Allow LOAD_FW after init */
176 DF_DISABLE_TCB_CACHE = (1 << 2), /* Disable TCB cache (T6+) */
177 DF_DISABLE_CFG_RETRY = (1 << 3), /* Disable fallback config */
178 DF_VERBOSE_SLOWINTR = (1 << 4), /* Chatty slow intr handler */
179 };
180
181 #define IS_DOOMED(vi) ((vi)->flags & DOOMED)
182 #define SET_DOOMED(vi) do {(vi)->flags |= DOOMED;} while (0)
183 #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY)
184 #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0)
185 #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
186
187 struct vi_info {
188 device_t dev;
189 struct port_info *pi;
190
191 struct ifnet *ifp;
192
193 unsigned long flags;
194 int if_flags;
195
196 uint16_t *rss, *nm_rss;
197 uint16_t viid; /* opaque VI identifier */
198 uint16_t smt_idx;
199 uint16_t vin;
200 uint8_t vfvld;
201 int16_t xact_addr_filt;/* index of exact MAC address filter */
202 uint16_t rss_size; /* size of VI's RSS table slice */
203 uint16_t rss_base; /* start of VI's RSS table slice */
204 int hashen;
205
206 int nintr;
207 int first_intr;
208
209 /* These need to be int as they are used in sysctl */
210 int ntxq; /* # of tx queues */
211 int first_txq; /* index of first tx queue */
212 int rsrv_noflowq; /* Reserve queue 0 for non-flowid packets */
213 int nrxq; /* # of rx queues */
214 int first_rxq; /* index of first rx queue */
215 int nofldtxq; /* # of offload tx queues */
216 int first_ofld_txq; /* index of first offload tx queue */
217 int nofldrxq; /* # of offload rx queues */
218 int first_ofld_rxq; /* index of first offload rx queue */
219 int nnmtxq;
220 int first_nm_txq;
221 int nnmrxq;
222 int first_nm_rxq;
223 int tmr_idx;
224 int ofld_tmr_idx;
225 int pktc_idx;
226 int ofld_pktc_idx;
227 int qsize_rxq;
228 int qsize_txq;
229
230 struct timeval last_refreshed;
231 struct fw_vi_stats_vf stats;
232
233 struct callout tick;
234 struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */
235
236 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
237 };
238
239 struct tx_ch_rl_params {
240 enum fw_sched_params_rate ratemode; /* %port (REL) or kbps (ABS) */
241 uint32_t maxrate;
242 };
243
244 enum {
245 CLRL_USER = (1 << 0), /* allocated manually. */
246 CLRL_SYNC = (1 << 1), /* sync hw update in progress. */
247 CLRL_ASYNC = (1 << 2), /* async hw update requested. */
248 CLRL_ERR = (1 << 3), /* last hw setup ended in error. */
249 };
250
251 struct tx_cl_rl_params {
252 int refcount;
253 uint8_t flags;
254 enum fw_sched_params_rate ratemode; /* %port REL or ABS value */
255 enum fw_sched_params_unit rateunit; /* kbps or pps (when ABS) */
256 enum fw_sched_params_mode mode; /* aggr or per-flow */
257 uint32_t maxrate;
258 uint16_t pktsize;
259 uint16_t burstsize;
260 };
261
262 /* Tx scheduler parameters for a channel/port */
263 struct tx_sched_params {
264 /* Channel Rate Limiter */
265 struct tx_ch_rl_params ch_rl;
266
267 /* Class WRR */
268 /* XXX */
269
270 /* Class Rate Limiter (including the default pktsize and burstsize). */
271 int pktsize;
272 int burstsize;
273 struct tx_cl_rl_params cl_rl[];
274 };
275
276 struct port_info {
277 device_t dev;
278 struct adapter *adapter;
279
280 struct vi_info *vi;
281 int nvi;
282 int up_vis;
283 int uld_vis;
284
285 struct tx_sched_params *sched_params;
286
287 struct mtx pi_lock;
288 char lockname[16];
289 unsigned long flags;
290
291 uint8_t lport; /* associated offload logical port */
292 int8_t mdio_addr;
293 uint8_t port_type;
294 uint8_t mod_type;
295 uint8_t port_id;
296 uint8_t tx_chan;
297 uint8_t mps_bg_map; /* rx MPS buffer group bitmap */
298 uint8_t rx_e_chan_map; /* rx TP e-channel bitmap */
299
300 struct link_config link_cfg;
301 struct ifmedia media;
302
303 struct timeval last_refreshed;
304 struct port_stats stats;
305 u_int tnl_cong_drops;
306 u_int tx_parse_error;
307 u_long tx_tls_records;
308 u_long tx_tls_octets;
309 u_long rx_tls_records;
310 u_long rx_tls_octets;
311
312 struct callout tick;
313 };
314
315 #define IS_MAIN_VI(vi) ((vi) == &((vi)->pi->vi[0]))
316
317 /* Where the cluster came from, how it has been carved up. */
318 struct cluster_layout {
319 int8_t zidx;
320 int8_t hwidx;
321 uint16_t region1; /* mbufs laid out within this region */
322 /* region2 is the DMA region */
323 uint16_t region3; /* cluster_metadata within this region */
324 };
325
326 struct cluster_metadata {
327 u_int refcount;
328 struct fl_sdesc *sd; /* For debug only. Could easily be stale */
329 };
330
331 struct fl_sdesc {
332 caddr_t cl;
333 uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */
334 struct cluster_layout cll;
335 };
336
337 struct tx_desc {
338 __be64 flit[8];
339 };
340
341 struct tx_sdesc {
342 struct mbuf *m; /* m_nextpkt linked chain of frames */
343 uint8_t desc_used; /* # of hardware descriptors used by the WR */
344 };
345
346
347 #define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header))
348 struct iq_desc {
349 struct rss_header rss;
350 uint8_t cpl[IQ_PAD];
351 struct rsp_ctrl rsp;
352 };
353 #undef IQ_PAD
354 CTASSERT(sizeof(struct iq_desc) == IQ_ESIZE);
355
356 enum {
357 /* iq flags */
358 IQ_ALLOCATED = (1 << 0), /* firmware resources allocated */
359 IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */
360 IQ_RX_TIMESTAMP = (1 << 2), /* provide the SGE rx timestamp */
361 IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */
362 IQ_ADJ_CREDIT = (1 << 4), /* hw is off by 1 credit for this iq */
363
364 /* iq state */
365 IQS_DISABLED = 0,
366 IQS_BUSY = 1,
367 IQS_IDLE = 2,
368
369 /* netmap related flags */
370 NM_OFF = 0,
371 NM_ON = 1,
372 NM_BUSY = 2,
373 };
374
375 enum {
376 CPL_COOKIE_RESERVED = 0,
377 CPL_COOKIE_FILTER,
378 CPL_COOKIE_DDP0,
379 CPL_COOKIE_DDP1,
380 CPL_COOKIE_TOM,
381 CPL_COOKIE_HASHFILTER,
382 CPL_COOKIE_ETHOFLD,
383 CPL_COOKIE_AVAILABLE3,
384
385 NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */
386 };
387
388 struct sge_iq;
389 struct rss_header;
390 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
391 struct mbuf *);
392 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
393 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
394
395 /*
396 * Ingress Queue: T4 is producer, driver is consumer.
397 */
398 struct sge_iq {
399 uint32_t flags;
400 volatile int state;
401 struct adapter *adapter;
402 struct iq_desc *desc; /* KVA of descriptor ring */
403 int8_t intr_pktc_idx; /* packet count threshold index */
404 uint8_t gen; /* generation bit */
405 uint8_t intr_params; /* interrupt holdoff parameters */
406 uint8_t intr_next; /* XXX: holdoff for next interrupt */
407 uint16_t qsize; /* size (# of entries) of the queue */
408 uint16_t sidx; /* index of the entry with the status page */
409 uint16_t cidx; /* consumer index */
410 uint16_t cntxt_id; /* SGE context id for the iq */
411 uint16_t abs_id; /* absolute SGE id for the iq */
412
413 STAILQ_ENTRY(sge_iq) link;
414
415 bus_dma_tag_t desc_tag;
416 bus_dmamap_t desc_map;
417 bus_addr_t ba; /* bus address of descriptor ring */
418 };
419
420 enum {
421 EQ_CTRL = 1,
422 EQ_ETH = 2,
423 EQ_OFLD = 3,
424
425 /* eq flags */
426 EQ_TYPEMASK = 0x3, /* 2 lsbits hold the type (see above) */
427 EQ_ALLOCATED = (1 << 2), /* firmware resources allocated */
428 EQ_ENABLED = (1 << 3), /* open for business */
429 EQ_QFLUSH = (1 << 4), /* if_qflush in progress */
430 };
431
432 /* Listed in order of preference. Update t4_sysctls too if you change these */
433 enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
434
435 /*
436 * Egress Queue: driver is producer, T4 is consumer.
437 *
438 * Note: A free list is an egress queue (driver produces the buffers and T4
439 * consumes them) but it's special enough to have its own struct (see sge_fl).
440 */
441 struct sge_eq {
442 unsigned int flags; /* MUST be first */
443 unsigned int cntxt_id; /* SGE context id for the eq */
444 unsigned int abs_id; /* absolute SGE id for the eq */
445 struct mtx eq_lock;
446
447 struct tx_desc *desc; /* KVA of descriptor ring */
448 uint8_t doorbells;
449 volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */
450 u_int udb_qid; /* relative qid within the doorbell page */
451 uint16_t sidx; /* index of the entry with the status page */
452 uint16_t cidx; /* consumer idx (desc idx) */
453 uint16_t pidx; /* producer idx (desc idx) */
454 uint16_t equeqidx; /* EQUEQ last requested at this pidx */
455 uint16_t dbidx; /* pidx of the most recent doorbell */
456 uint16_t iqid; /* iq that gets egr_update for the eq */
457 uint8_t tx_chan; /* tx channel used by the eq */
458 volatile u_int equiq; /* EQUIQ outstanding */
459
460 bus_dma_tag_t desc_tag;
461 bus_dmamap_t desc_map;
462 bus_addr_t ba; /* bus address of descriptor ring */
463 char lockname[16];
464 };
465
466 struct sw_zone_info {
467 uma_zone_t zone; /* zone that this cluster comes from */
468 int size; /* size of cluster: 2K, 4K, 9K, 16K, etc. */
469 int type; /* EXT_xxx type of the cluster */
470 int8_t head_hwidx;
471 int8_t tail_hwidx;
472 };
473
474 struct hw_buf_info {
475 int8_t zidx; /* backpointer to zone; -ve means unused */
476 int8_t next; /* next hwidx for this zone; -1 means no more */
477 int size;
478 };
479
480 enum {
481 NUM_MEMWIN = 3,
482
483 MEMWIN0_APERTURE = 2048,
484 MEMWIN0_BASE = 0x1b800,
485
486 MEMWIN1_APERTURE = 32768,
487 MEMWIN1_BASE = 0x28000,
488
489 MEMWIN2_APERTURE_T4 = 65536,
490 MEMWIN2_BASE_T4 = 0x30000,
491
492 MEMWIN2_APERTURE_T5 = 128 * 1024,
493 MEMWIN2_BASE_T5 = 0x60000,
494 };
495
496 struct memwin {
497 struct rwlock mw_lock __aligned(CACHE_LINE_SIZE);
498 uint32_t mw_base; /* constant after setup_memwin */
499 uint32_t mw_aperture; /* ditto */
500 uint32_t mw_curpos; /* protected by mw_lock */
501 };
502
503 enum {
504 FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */
505 FL_DOOMED = (1 << 1), /* about to be destroyed */
506 FL_BUF_PACKING = (1 << 2), /* buffer packing enabled */
507 FL_BUF_RESUME = (1 << 3), /* resume from the middle of the frame */
508 };
509
510 #define FL_RUNNING_LOW(fl) \
511 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat)
512 #define FL_NOT_RUNNING_LOW(fl) \
513 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat)
514
515 struct sge_fl {
516 struct mtx fl_lock;
517 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
518 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
519 struct cluster_layout cll_def; /* default refill zone, layout */
520 uint16_t lowat; /* # of buffers <= this means fl needs help */
521 int flags;
522 uint16_t buf_boundary;
523
524 /* The 16b idx all deal with hw descriptors */
525 uint16_t dbidx; /* hw pidx after last doorbell */
526 uint16_t sidx; /* index of status page */
527 volatile uint16_t hw_cidx;
528
529 /* The 32b idx are all buffer idx, not hardware descriptor idx */
530 uint32_t cidx; /* consumer index */
531 uint32_t pidx; /* producer index */
532
533 uint32_t dbval;
534 u_int rx_offset; /* offset in fl buf (when buffer packing) */
535 volatile uint32_t *udb;
536
537 uint64_t mbuf_allocated;/* # of mbuf allocated from zone_mbuf */
538 uint64_t mbuf_inlined; /* # of mbuf created within clusters */
539 uint64_t cl_allocated; /* # of clusters allocated */
540 uint64_t cl_recycled; /* # of clusters recycled */
541 uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */
542
543 /* These 3 are valid when FL_BUF_RESUME is set, stale otherwise. */
544 struct mbuf *m0;
545 struct mbuf **pnext;
546 u_int remaining;
547
548 uint16_t qsize; /* # of hw descriptors (status page included) */
549 uint16_t cntxt_id; /* SGE context id for the freelist */
550 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
551 bus_dma_tag_t desc_tag;
552 bus_dmamap_t desc_map;
553 char lockname[16];
554 bus_addr_t ba; /* bus address of descriptor ring */
555 struct cluster_layout cll_alt; /* alternate refill zone, layout */
556 };
557
558 struct mp_ring;
559
560 /* txq: SGE egress queue + what's needed for Ethernet NIC */
561 struct sge_txq {
562 struct sge_eq eq; /* MUST be first */
563
564 struct ifnet *ifp; /* the interface this txq belongs to */
565 struct mp_ring *r; /* tx software ring */
566 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
567 struct sglist *gl;
568 __be32 cpl_ctrl0; /* for convenience */
569 int tc_idx; /* traffic class */
570
571 struct task tx_reclaim_task;
572 /* stats for common events first */
573
574 uint64_t txcsum; /* # of times hardware assisted with checksum */
575 uint64_t tso_wrs; /* # of TSO work requests */
576 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
577 uint64_t imm_wrs; /* # of work requests with immediate data */
578 uint64_t sgl_wrs; /* # of work requests with direct SGL */
579 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
580 uint64_t txpkts0_wrs; /* # of type0 coalesced tx work requests */
581 uint64_t txpkts1_wrs; /* # of type1 coalesced tx work requests */
582 uint64_t txpkts0_pkts; /* # of frames in type0 coalesced tx WRs */
583 uint64_t txpkts1_pkts; /* # of frames in type1 coalesced tx WRs */
584 uint64_t raw_wrs; /* # of raw work requests (alloc_wr_mbuf) */
585
586 /* stats for not-that-common events */
587 } __aligned(CACHE_LINE_SIZE);
588
589 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
590 struct sge_rxq {
591 struct sge_iq iq; /* MUST be first */
592 struct sge_fl fl; /* MUST follow iq */
593
594 struct ifnet *ifp; /* the interface this rxq belongs to */
595 #if defined(INET) || defined(INET6)
596 struct lro_ctrl lro; /* LRO state */
597 #endif
598
599 /* stats for common events first */
600
601 uint64_t rxcsum; /* # of times hardware assisted with checksum */
602 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
603
604 /* stats for not-that-common events */
605
606 } __aligned(CACHE_LINE_SIZE);
607
608 static inline struct sge_rxq *
iq_to_rxq(struct sge_iq * iq)609 iq_to_rxq(struct sge_iq *iq)
610 {
611
612 return (__containerof(iq, struct sge_rxq, iq));
613 }
614
615
616 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
617 struct sge_ofld_rxq {
618 struct sge_iq iq; /* MUST be first */
619 struct sge_fl fl; /* MUST follow iq */
620 } __aligned(CACHE_LINE_SIZE);
621
622 static inline struct sge_ofld_rxq *
iq_to_ofld_rxq(struct sge_iq * iq)623 iq_to_ofld_rxq(struct sge_iq *iq)
624 {
625
626 return (__containerof(iq, struct sge_ofld_rxq, iq));
627 }
628
629 struct wrqe {
630 STAILQ_ENTRY(wrqe) link;
631 struct sge_wrq *wrq;
632 int wr_len;
633 char wr[] __aligned(16);
634 };
635
636 struct wrq_cookie {
637 TAILQ_ENTRY(wrq_cookie) link;
638 int ndesc;
639 int pidx;
640 };
641
642 /*
643 * wrq: SGE egress queue that is given prebuilt work requests. Both the control
644 * and offload tx queues are of this type.
645 */
646 struct sge_wrq {
647 struct sge_eq eq; /* MUST be first */
648
649 struct adapter *adapter;
650 struct task wrq_tx_task;
651
652 /* Tx desc reserved but WR not "committed" yet. */
653 TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs;
654
655 /* List of WRs ready to go out as soon as descriptors are available. */
656 STAILQ_HEAD(, wrqe) wr_list;
657 u_int nwr_pending;
658 u_int ndesc_needed;
659
660 /* stats for common events first */
661
662 uint64_t tx_wrs_direct; /* # of WRs written directly to desc ring. */
663 uint64_t tx_wrs_ss; /* # of WRs copied from scratch space. */
664 uint64_t tx_wrs_copied; /* # of WRs queued and copied to desc ring. */
665
666 /* stats for not-that-common events */
667
668 /*
669 * Scratch space for work requests that wrap around after reaching the
670 * status page, and some information about the last WR that used it.
671 */
672 uint16_t ss_pidx;
673 uint16_t ss_len;
674 uint8_t ss[SGE_MAX_WR_LEN];
675
676 } __aligned(CACHE_LINE_SIZE);
677
678 #define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
679 struct sge_nm_rxq {
680 volatile int nm_state; /* NM_OFF, NM_ON, or NM_BUSY */
681 struct vi_info *vi;
682
683 struct iq_desc *iq_desc;
684 uint16_t iq_abs_id;
685 uint16_t iq_cntxt_id;
686 uint16_t iq_cidx;
687 uint16_t iq_sidx;
688 uint8_t iq_gen;
689
690 __be64 *fl_desc;
691 uint16_t fl_cntxt_id;
692 uint32_t fl_cidx;
693 uint32_t fl_pidx;
694 uint32_t fl_sidx;
695 uint32_t fl_db_val;
696 u_int fl_hwidx:4;
697
698 u_int fl_db_saved;
699 u_int nid; /* netmap ring # for this queue */
700
701 /* infrequently used items after this */
702
703 bus_dma_tag_t iq_desc_tag;
704 bus_dmamap_t iq_desc_map;
705 bus_addr_t iq_ba;
706 int intr_idx;
707
708 bus_dma_tag_t fl_desc_tag;
709 bus_dmamap_t fl_desc_map;
710 bus_addr_t fl_ba;
711 } __aligned(CACHE_LINE_SIZE);
712
713 #define INVALID_NM_TXQ_CNTXT_ID ((u_int)(-1))
714 struct sge_nm_txq {
715 struct tx_desc *desc;
716 uint16_t cidx;
717 uint16_t pidx;
718 uint16_t sidx;
719 uint16_t equiqidx; /* EQUIQ last requested at this pidx */
720 uint16_t equeqidx; /* EQUEQ last requested at this pidx */
721 uint16_t dbidx; /* pidx of the most recent doorbell */
722 uint8_t doorbells;
723 volatile uint32_t *udb;
724 u_int udb_qid;
725 u_int cntxt_id;
726 __be32 cpl_ctrl0; /* for convenience */
727 u_int nid; /* netmap ring # for this queue */
728
729 /* infrequently used items after this */
730
731 bus_dma_tag_t desc_tag;
732 bus_dmamap_t desc_map;
733 bus_addr_t ba;
734 int iqidx;
735 } __aligned(CACHE_LINE_SIZE);
736
737 struct sge {
738 int nrxq; /* total # of Ethernet rx queues */
739 int ntxq; /* total # of Ethernet tx queues */
740 int nofldrxq; /* total # of TOE rx queues */
741 int nofldtxq; /* total # of TOE tx queues */
742 int nnmrxq; /* total # of netmap rx queues */
743 int nnmtxq; /* total # of netmap tx queues */
744 int niq; /* total # of ingress queues */
745 int neq; /* total # of egress queues */
746
747 struct sge_iq fwq; /* Firmware event queue */
748 struct sge_wrq *ctrlq; /* Control queues */
749 struct sge_txq *txq; /* NIC tx queues */
750 struct sge_rxq *rxq; /* NIC rx queues */
751 struct sge_wrq *ofld_txq; /* TOE tx queues */
752 struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */
753 struct sge_nm_txq *nm_txq; /* netmap tx queues */
754 struct sge_nm_rxq *nm_rxq; /* netmap rx queues */
755
756 uint16_t iq_start; /* first cntxt_id */
757 uint16_t iq_base; /* first abs_id */
758 int eq_start; /* first cntxt_id */
759 int eq_base; /* first abs_id */
760 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
761 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
762
763 int8_t safe_hwidx1; /* may not have room for metadata */
764 int8_t safe_hwidx2; /* with room for metadata and maybe more */
765 struct sw_zone_info sw_zone_info[SW_ZONE_SIZES];
766 struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES];
767 };
768
769 struct devnames {
770 const char *nexus_name;
771 const char *ifnet_name;
772 const char *vi_ifnet_name;
773 const char *pf03_drv_name;
774 const char *vf_nexus_name;
775 const char *vf_ifnet_name;
776 };
777
778 struct clip_entry;
779
780 struct adapter {
781 SLIST_ENTRY(adapter) link;
782 device_t dev;
783 struct cdev *cdev;
784 const struct devnames *names;
785
786 /* PCIe register resources */
787 int regs_rid;
788 struct resource *regs_res;
789 int msix_rid;
790 struct resource *msix_res;
791 bus_space_handle_t bh;
792 bus_space_tag_t bt;
793 bus_size_t mmio_len;
794 int udbs_rid;
795 struct resource *udbs_res;
796 volatile uint8_t *udbs_base;
797
798 unsigned int pf;
799 unsigned int mbox;
800 unsigned int vpd_busy;
801 unsigned int vpd_flag;
802
803 /* Interrupt information */
804 int intr_type;
805 int intr_count;
806 struct irq {
807 struct resource *res;
808 int rid;
809 void *tag;
810 struct sge_rxq *rxq;
811 struct sge_nm_rxq *nm_rxq;
812 } __aligned(CACHE_LINE_SIZE) *irq;
813 int sge_gts_reg;
814 int sge_kdoorbell_reg;
815
816 bus_dma_tag_t dmat; /* Parent DMA tag */
817
818 struct sge sge;
819 int lro_timeout;
820 int sc_do_rxcopy;
821
822 struct taskqueue *tq[MAX_NCHAN]; /* General purpose taskqueues */
823 struct port_info *port[MAX_NPORTS];
824 uint8_t chan_map[MAX_NCHAN]; /* channel -> port */
825
826 struct mtx clip_table_lock;
827 TAILQ_HEAD(, clip_entry) clip_table;
828 int clip_gen;
829
830 void *tom_softc; /* (struct tom_data *) */
831 struct tom_tunables tt;
832 struct t4_offload_policy *policy;
833 struct rwlock policy_lock;
834
835 void *iwarp_softc; /* (struct c4iw_dev *) */
836 struct iw_tunables iwt;
837 void *iscsi_ulp_softc; /* (struct cxgbei_data *) */
838 void *ccr_softc; /* (struct ccr_softc *) */
839 struct l2t_data *l2t; /* L2 table */
840 struct smt_data *smt; /* Source MAC Table */
841 struct tid_info tids;
842 vmem_t *key_map;
843
844 uint8_t doorbells;
845 int offload_map; /* ports with IFCAP_TOE enabled */
846 int active_ulds; /* ULDs activated on this adapter */
847 int flags;
848 int debug_flags;
849
850 char ifp_lockname[16];
851 struct mtx ifp_lock;
852 struct ifnet *ifp; /* tracer ifp */
853 struct ifmedia media;
854 int traceq; /* iq used by all tracers, -1 if none */
855 int tracer_valid; /* bitmap of valid tracers */
856 int tracer_enabled; /* bitmap of enabled tracers */
857
858 char fw_version[16];
859 char tp_version[16];
860 char er_version[16];
861 char bs_version[16];
862 char cfg_file[32];
863 u_int cfcsum;
864 struct adapter_params params;
865 const struct chip_params *chip_params;
866 struct t4_virt_res vres;
867
868 uint16_t nbmcaps;
869 uint16_t linkcaps;
870 uint16_t switchcaps;
871 uint16_t niccaps;
872 uint16_t toecaps;
873 uint16_t rdmacaps;
874 uint16_t cryptocaps;
875 uint16_t iscsicaps;
876 uint16_t fcoecaps;
877
878 struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */
879
880 struct mtx sc_lock;
881 char lockname[16];
882
883 /* Starving free lists */
884 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */
885 TAILQ_HEAD(, sge_fl) sfl;
886 struct callout sfl_callout;
887
888 struct mtx reg_lock; /* for indirect register access */
889
890 struct memwin memwin[NUM_MEMWIN]; /* memory windows */
891
892 struct mtx tc_lock;
893 struct task tc_task;
894
895 const char *last_op;
896 const void *last_op_thr;
897 int last_op_flags;
898
899 int swintr;
900 };
901
902 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
903 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
904 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
905 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
906
907 #define ASSERT_SYNCHRONIZED_OP(sc) \
908 KASSERT(IS_BUSY(sc) && \
909 (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
910 ("%s: operation not synchronized.", __func__))
911
912 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
913 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
914 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
915 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
916
917 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
918 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
919 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
920 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
921 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
922
923 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
924 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
925 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
926 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
927
928 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
929 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
930 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
931 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
932 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
933
934 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
935 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
936 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
937 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
938 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
939
940 #define for_each_txq(vi, iter, q) \
941 for (q = &vi->pi->adapter->sge.txq[vi->first_txq], iter = 0; \
942 iter < vi->ntxq; ++iter, ++q)
943 #define for_each_rxq(vi, iter, q) \
944 for (q = &vi->pi->adapter->sge.rxq[vi->first_rxq], iter = 0; \
945 iter < vi->nrxq; ++iter, ++q)
946 #define for_each_ofld_txq(vi, iter, q) \
947 for (q = &vi->pi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \
948 iter < vi->nofldtxq; ++iter, ++q)
949 #define for_each_ofld_rxq(vi, iter, q) \
950 for (q = &vi->pi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \
951 iter < vi->nofldrxq; ++iter, ++q)
952 #define for_each_nm_txq(vi, iter, q) \
953 for (q = &vi->pi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \
954 iter < vi->nnmtxq; ++iter, ++q)
955 #define for_each_nm_rxq(vi, iter, q) \
956 for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \
957 iter < vi->nnmrxq; ++iter, ++q)
958 #define for_each_vi(_pi, _iter, _vi) \
959 for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \
960 ++(_iter), ++(_vi))
961
962 #define IDXINCR(idx, incr, wrap) do { \
963 idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \
964 } while (0)
965 #define IDXDIFF(head, tail, wrap) \
966 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
967
968 /* One for errors, one for firmware events */
969 #define T4_EXTRA_INTR 2
970
971 /* One for firmware events */
972 #define T4VF_EXTRA_INTR 1
973
974 static inline int
forwarding_intr_to_fwq(struct adapter * sc)975 forwarding_intr_to_fwq(struct adapter *sc)
976 {
977
978 return (sc->intr_count == 1);
979 }
980
981 static inline uint32_t
t4_read_reg(struct adapter * sc,uint32_t reg)982 t4_read_reg(struct adapter *sc, uint32_t reg)
983 {
984
985 return bus_space_read_4(sc->bt, sc->bh, reg);
986 }
987
988 static inline void
t4_write_reg(struct adapter * sc,uint32_t reg,uint32_t val)989 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
990 {
991
992 bus_space_write_4(sc->bt, sc->bh, reg, val);
993 }
994
995 static inline uint64_t
t4_read_reg64(struct adapter * sc,uint32_t reg)996 t4_read_reg64(struct adapter *sc, uint32_t reg)
997 {
998
999 #ifdef __LP64__
1000 return bus_space_read_8(sc->bt, sc->bh, reg);
1001 #else
1002 return (uint64_t)bus_space_read_4(sc->bt, sc->bh, reg) +
1003 ((uint64_t)bus_space_read_4(sc->bt, sc->bh, reg + 4) << 32);
1004
1005 #endif
1006 }
1007
1008 static inline void
t4_write_reg64(struct adapter * sc,uint32_t reg,uint64_t val)1009 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
1010 {
1011
1012 #ifdef __LP64__
1013 bus_space_write_8(sc->bt, sc->bh, reg, val);
1014 #else
1015 bus_space_write_4(sc->bt, sc->bh, reg, val);
1016 bus_space_write_4(sc->bt, sc->bh, reg + 4, val>> 32);
1017 #endif
1018 }
1019
1020 static inline void
t4_os_pci_read_cfg1(struct adapter * sc,int reg,uint8_t * val)1021 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
1022 {
1023
1024 *val = pci_read_config(sc->dev, reg, 1);
1025 }
1026
1027 static inline void
t4_os_pci_write_cfg1(struct adapter * sc,int reg,uint8_t val)1028 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
1029 {
1030
1031 pci_write_config(sc->dev, reg, val, 1);
1032 }
1033
1034 static inline void
t4_os_pci_read_cfg2(struct adapter * sc,int reg,uint16_t * val)1035 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
1036 {
1037
1038 *val = pci_read_config(sc->dev, reg, 2);
1039 }
1040
1041 static inline void
t4_os_pci_write_cfg2(struct adapter * sc,int reg,uint16_t val)1042 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
1043 {
1044
1045 pci_write_config(sc->dev, reg, val, 2);
1046 }
1047
1048 static inline void
t4_os_pci_read_cfg4(struct adapter * sc,int reg,uint32_t * val)1049 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
1050 {
1051
1052 *val = pci_read_config(sc->dev, reg, 4);
1053 }
1054
1055 static inline void
t4_os_pci_write_cfg4(struct adapter * sc,int reg,uint32_t val)1056 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
1057 {
1058
1059 pci_write_config(sc->dev, reg, val, 4);
1060 }
1061
1062 static inline struct port_info *
adap2pinfo(struct adapter * sc,int idx)1063 adap2pinfo(struct adapter *sc, int idx)
1064 {
1065
1066 return (sc->port[idx]);
1067 }
1068
1069 static inline void
t4_os_set_hw_addr(struct port_info * pi,uint8_t hw_addr[])1070 t4_os_set_hw_addr(struct port_info *pi, uint8_t hw_addr[])
1071 {
1072
1073 bcopy(hw_addr, pi->vi[0].hw_addr, ETHER_ADDR_LEN);
1074 }
1075
1076 static inline int
tx_resume_threshold(struct sge_eq * eq)1077 tx_resume_threshold(struct sge_eq *eq)
1078 {
1079
1080 /* not quite the same as qsize / 4, but this will do. */
1081 return (eq->sidx / 4);
1082 }
1083
1084 static inline int
t4_use_ldst(struct adapter * sc)1085 t4_use_ldst(struct adapter *sc)
1086 {
1087
1088 #ifdef notyet
1089 return (sc->flags & FW_OK || !sc->use_bd);
1090 #else
1091 return (0);
1092 #endif
1093 }
1094
1095 static inline void
CH_DUMP_MBOX(struct adapter * sc,int mbox,const int reg,const char * msg,const __be64 * const p,const bool err)1096 CH_DUMP_MBOX(struct adapter *sc, int mbox, const int reg,
1097 const char *msg, const __be64 *const p, const bool err)
1098 {
1099
1100 if (!(sc->debug_flags & DF_DUMP_MBOX) && !err)
1101 return;
1102 if (p != NULL) {
1103 log(err ? LOG_ERR : LOG_DEBUG,
1104 "%s: mbox %u %s %016llx %016llx %016llx %016llx "
1105 "%016llx %016llx %016llx %016llx\n",
1106 device_get_nameunit(sc->dev), mbox, msg,
1107 (long long)be64_to_cpu(p[0]), (long long)be64_to_cpu(p[1]),
1108 (long long)be64_to_cpu(p[2]), (long long)be64_to_cpu(p[3]),
1109 (long long)be64_to_cpu(p[4]), (long long)be64_to_cpu(p[5]),
1110 (long long)be64_to_cpu(p[6]), (long long)be64_to_cpu(p[7]));
1111 } else {
1112 log(err ? LOG_ERR : LOG_DEBUG,
1113 "%s: mbox %u %s %016llx %016llx %016llx %016llx "
1114 "%016llx %016llx %016llx %016llx\n",
1115 device_get_nameunit(sc->dev), mbox, msg,
1116 (long long)t4_read_reg64(sc, reg),
1117 (long long)t4_read_reg64(sc, reg + 8),
1118 (long long)t4_read_reg64(sc, reg + 16),
1119 (long long)t4_read_reg64(sc, reg + 24),
1120 (long long)t4_read_reg64(sc, reg + 32),
1121 (long long)t4_read_reg64(sc, reg + 40),
1122 (long long)t4_read_reg64(sc, reg + 48),
1123 (long long)t4_read_reg64(sc, reg + 56));
1124 }
1125 }
1126
1127 /* t4_main.c */
1128 extern int t4_ntxq;
1129 extern int t4_nrxq;
1130 extern int t4_intr_types;
1131 extern int t4_tmr_idx;
1132 extern int t4_pktc_idx;
1133 extern unsigned int t4_qsize_rxq;
1134 extern unsigned int t4_qsize_txq;
1135 extern device_method_t cxgbe_methods[];
1136
1137 int t4_os_find_pci_capability(struct adapter *, int);
1138 int t4_os_pci_save_state(struct adapter *);
1139 int t4_os_pci_restore_state(struct adapter *);
1140 void t4_os_portmod_changed(struct port_info *);
1141 void t4_os_link_changed(struct port_info *);
1142 void t4_iterate(void (*)(struct adapter *, void *), void *);
1143 void t4_init_devnames(struct adapter *);
1144 void t4_add_adapter(struct adapter *);
1145 void t4_aes_getdeckey(void *, const void *, unsigned int);
1146 int t4_detach_common(device_t);
1147 int t4_map_bars_0_and_4(struct adapter *);
1148 int t4_map_bar_2(struct adapter *);
1149 int t4_setup_intr_handlers(struct adapter *);
1150 void t4_sysctls(struct adapter *);
1151 int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *);
1152 void doom_vi(struct adapter *, struct vi_info *);
1153 void end_synchronized_op(struct adapter *, int);
1154 int update_mac_settings(struct ifnet *, int);
1155 int adapter_full_init(struct adapter *);
1156 int adapter_full_uninit(struct adapter *);
1157 uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
1158 int vi_full_init(struct vi_info *);
1159 int vi_full_uninit(struct vi_info *);
1160 void vi_sysctls(struct vi_info *);
1161 void vi_tick(void *);
1162 int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int);
1163 int alloc_atid_tab(struct tid_info *, int);
1164 void free_atid_tab(struct tid_info *);
1165 int alloc_atid(struct adapter *, void *);
1166 void *lookup_atid(struct adapter *, int);
1167 void free_atid(struct adapter *, int);
1168 void release_tid(struct adapter *, int, struct sge_wrq *);
1169 int cxgbe_media_change(struct ifnet *);
1170 void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
1171 bool t4_os_dump_cimla(struct adapter *, int, bool);
1172 void t4_os_dump_devlog(struct adapter *);
1173
1174 #ifdef DEV_NETMAP
1175 /* t4_netmap.c */
1176 struct sge_nm_rxq;
1177 void cxgbe_nm_attach(struct vi_info *);
1178 void cxgbe_nm_detach(struct vi_info *);
1179 void service_nm_rxq(struct sge_nm_rxq *);
1180 #endif
1181
1182 /* t4_sge.c */
1183 void t4_sge_modload(void);
1184 void t4_sge_modunload(void);
1185 uint64_t t4_sge_extfree_refs(void);
1186 void t4_tweak_chip_settings(struct adapter *);
1187 int t4_read_chip_settings(struct adapter *);
1188 int t4_create_dma_tag(struct adapter *);
1189 void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *,
1190 struct sysctl_oid_list *);
1191 int t4_destroy_dma_tag(struct adapter *);
1192 int t4_setup_adapter_queues(struct adapter *);
1193 int t4_teardown_adapter_queues(struct adapter *);
1194 int t4_setup_vi_queues(struct vi_info *);
1195 int t4_teardown_vi_queues(struct vi_info *);
1196 void t4_intr_all(void *);
1197 void t4_intr(void *);
1198 #ifdef DEV_NETMAP
1199 void t4_nm_intr(void *);
1200 void t4_vi_intr(void *);
1201 #endif
1202 void t4_intr_err(void *);
1203 void t4_intr_evt(void *);
1204 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
1205 void t4_update_fl_bufsize(struct ifnet *);
1206 struct mbuf *alloc_wr_mbuf(int, int);
1207 int parse_pkt(struct adapter *, struct mbuf **);
1208 void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *);
1209 void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *);
1210 int tnl_cong(struct port_info *, int);
1211 void t4_register_an_handler(an_handler_t);
1212 void t4_register_fw_msg_handler(int, fw_msg_handler_t);
1213 void t4_register_cpl_handler(int, cpl_handler_t);
1214 void t4_register_shared_cpl_handler(int, cpl_handler_t, int);
1215 #ifdef RATELIMIT
1216 int ethofld_transmit(struct ifnet *, struct mbuf *);
1217 void send_etid_flush_wr(struct cxgbe_snd_tag *);
1218 #endif
1219
1220 /* t4_tracer.c */
1221 struct t4_tracer;
1222 void t4_tracer_modload(void);
1223 void t4_tracer_modunload(void);
1224 void t4_tracer_port_detach(struct adapter *);
1225 int t4_get_tracer(struct adapter *, struct t4_tracer *);
1226 int t4_set_tracer(struct adapter *, struct t4_tracer *);
1227 int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
1228 int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
1229
1230 /* t4_sched.c */
1231 int t4_set_sched_class(struct adapter *, struct t4_sched_params *);
1232 int t4_set_sched_queue(struct adapter *, struct t4_sched_queue *);
1233 int t4_init_tx_sched(struct adapter *);
1234 int t4_free_tx_sched(struct adapter *);
1235 void t4_update_tx_sched(struct adapter *);
1236 int t4_reserve_cl_rl_kbps(struct adapter *, int, u_int, int *);
1237 void t4_release_cl_rl(struct adapter *, int, int);
1238 int sysctl_tc(SYSCTL_HANDLER_ARGS);
1239 int sysctl_tc_params(SYSCTL_HANDLER_ARGS);
1240 #ifdef RATELIMIT
1241 void t4_init_etid_table(struct adapter *);
1242 void t4_free_etid_table(struct adapter *);
1243 struct cxgbe_snd_tag *lookup_etid(struct adapter *, int);
1244 int cxgbe_snd_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
1245 struct m_snd_tag **);
1246 int cxgbe_snd_tag_modify(struct m_snd_tag *, union if_snd_tag_modify_params *);
1247 int cxgbe_snd_tag_query(struct m_snd_tag *, union if_snd_tag_query_params *);
1248 void cxgbe_snd_tag_free(struct m_snd_tag *);
1249 void cxgbe_snd_tag_free_locked(struct cxgbe_snd_tag *);
1250 #endif
1251
1252 /* t4_filter.c */
1253 int get_filter_mode(struct adapter *, uint32_t *);
1254 int set_filter_mode(struct adapter *, uint32_t);
1255 int get_filter(struct adapter *, struct t4_filter *);
1256 int set_filter(struct adapter *, struct t4_filter *);
1257 int del_filter(struct adapter *, struct t4_filter *);
1258 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
1259 int t4_hashfilter_ao_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
1260 int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
1261 int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
1262 void free_hftid_hash(struct tid_info *);
1263
1264 static inline struct wrqe *
alloc_wrqe(int wr_len,struct sge_wrq * wrq)1265 alloc_wrqe(int wr_len, struct sge_wrq *wrq)
1266 {
1267 int len = offsetof(struct wrqe, wr) + wr_len;
1268 struct wrqe *wr;
1269
1270 wr = malloc(len, M_CXGBE, M_NOWAIT);
1271 if (__predict_false(wr == NULL))
1272 return (NULL);
1273 wr->wr_len = wr_len;
1274 wr->wrq = wrq;
1275 return (wr);
1276 }
1277
1278 static inline void *
wrtod(struct wrqe * wr)1279 wrtod(struct wrqe *wr)
1280 {
1281 return (&wr->wr[0]);
1282 }
1283
1284 static inline void
free_wrqe(struct wrqe * wr)1285 free_wrqe(struct wrqe *wr)
1286 {
1287 free(wr, M_CXGBE);
1288 }
1289
1290 static inline void
t4_wrq_tx(struct adapter * sc,struct wrqe * wr)1291 t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
1292 {
1293 struct sge_wrq *wrq = wr->wrq;
1294
1295 TXQ_LOCK(wrq);
1296 t4_wrq_tx_locked(sc, wrq, wr);
1297 TXQ_UNLOCK(wrq);
1298 }
1299
1300 static inline int
read_via_memwin(struct adapter * sc,int idx,uint32_t addr,uint32_t * val,int len)1301 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
1302 int len)
1303 {
1304
1305 return (rw_via_memwin(sc, idx, addr, val, len, 0));
1306 }
1307
1308 static inline int
write_via_memwin(struct adapter * sc,int idx,uint32_t addr,const uint32_t * val,int len)1309 write_via_memwin(struct adapter *sc, int idx, uint32_t addr,
1310 const uint32_t *val, int len)
1311 {
1312
1313 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1));
1314 }
1315 #endif
1316