1 /*-
2 * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3 * All rights reserved.
4 *
5 * Developed by Semihalf.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/lock.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/rman.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44
45 #include <machine/atomic.h>
46
47 #include "opt_inet.h"
48 #include "opt_inet6.h"
49
50 #include <net/ethernet.h>
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <netinet/in.h>
58 #include <net/if_vlan_var.h>
59 #include <netinet/tcp.h>
60 #include <netinet/tcp_lro.h>
61
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip.h>
67 #endif
68
69 #ifdef INET6
70 #include <netinet/ip6.h>
71 #endif
72
73 #include <sys/sockio.h>
74
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80
81 #include <al_hal_common.h>
82 #include <al_hal_plat_services.h>
83 #include <al_hal_udma_config.h>
84 #include <al_hal_udma_iofic.h>
85 #include <al_hal_udma_debug.h>
86 #include <al_hal_eth.h>
87
88 #include "al_eth.h"
89 #include "al_init_eth_lm.h"
90 #include "arm/annapurna/alpine/alpine_serdes.h"
91
92 #include "miibus_if.h"
93
94 #define device_printf_dbg(fmt, ...) do { \
95 if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK(); \
96 device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();} \
97 } while (0)
98
99 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
100
101 /* move out to some pci header file */
102 #define PCI_VENDOR_ID_ANNAPURNA_LABS 0x1c36
103 #define PCI_DEVICE_ID_AL_ETH 0x0001
104 #define PCI_DEVICE_ID_AL_ETH_ADVANCED 0x0002
105 #define PCI_DEVICE_ID_AL_ETH_NIC 0x0003
106 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC 0x0030
107 #define PCI_DEVICE_ID_AL_CRYPTO 0x0011
108 #define PCI_DEVICE_ID_AL_CRYPTO_VF 0x8011
109 #define PCI_DEVICE_ID_AL_RAID_DMA 0x0021
110 #define PCI_DEVICE_ID_AL_RAID_DMA_VF 0x8021
111 #define PCI_DEVICE_ID_AL_USB 0x0041
112
113 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
114 #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
115
116 #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0
117 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4
118 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
119 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
120
121 #define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1)
122 #define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1)
123
124 #define AL_ETH_THASH_UDMA_SHIFT 0
125 #define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT)
126
127 #define AL_ETH_THASH_Q_SHIFT 4
128 #define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT)
129
130 /* the following defines should be moved to hal */
131 #define AL_ETH_FSM_ENTRY_IPV4_TCP 0
132 #define AL_ETH_FSM_ENTRY_IPV4_UDP 1
133 #define AL_ETH_FSM_ENTRY_IPV6_TCP 2
134 #define AL_ETH_FSM_ENTRY_IPV6_UDP 3
135 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4
136 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5
137
138 /* FSM DATA format */
139 #define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0
140 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1
141 #define AL_ETH_FSM_DATA_INNER_2_TUPLE 2
142 #define AL_ETH_FSM_DATA_INNER_4_TUPLE 3
143
144 #define AL_ETH_FSM_DATA_HASH_SEL (1 << 2)
145
146 #define AL_ETH_FSM_DATA_DEFAULT_Q 0
147 #define AL_ETH_FSM_DATA_DEFAULT_UDMA 0
148
149 #define AL_BR_SIZE 512
150 #define AL_TSO_SIZE 65500
151 #define AL_DEFAULT_MTU 1500
152
153 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
154
155 #define AL_IP_ALIGNMENT_OFFSET 2
156
157 #define SFP_I2C_ADDR 0x50
158
159 #define AL_MASK_GROUP_A_INT 0x7
160 #define AL_MASK_GROUP_B_INT 0xF
161 #define AL_MASK_GROUP_C_INT 0xF
162 #define AL_MASK_GROUP_D_INT 0xFFFFFFFF
163
164 #define AL_REG_OFFSET_FORWARD_INTR (0x1800000 + 0x1210)
165 #define AL_EN_FORWARD_INTR 0x1FFFF
166 #define AL_DIS_FORWARD_INTR 0
167
168 #define AL_M2S_MASK_INIT 0x480
169 #define AL_S2M_MASK_INIT 0x1E0
170 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25)
171
172 #define AL_10BASE_T_SPEED 10
173 #define AL_100BASE_TX_SPEED 100
174 #define AL_1000BASE_T_SPEED 1000
175
176 #define AL_RX_LOCK_INIT(_sc) mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
177 #define AL_RX_LOCK(_sc) mtx_lock(&((_sc)->if_rx_lock))
178 #define AL_RX_UNLOCK(_sc) mtx_unlock(&((_sc)->if_rx_lock))
179
180 /* helper functions */
181 static int al_is_device_supported(device_t);
182
183 static void al_eth_init_rings(struct al_eth_adapter *);
184 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
185 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
186 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
187 int al_eth_read_pci_config(void *, int, uint32_t *);
188 int al_eth_write_pci_config(void *, int, uint32_t);
189 void al_eth_irq_config(uint32_t *, uint32_t);
190 void al_eth_forward_int_config(uint32_t *, uint32_t);
191 static void al_eth_start_xmit(void *, int);
192 static void al_eth_rx_recv_work(void *, int);
193 static int al_eth_up(struct al_eth_adapter *);
194 static void al_eth_down(struct al_eth_adapter *);
195 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
196 static void al_eth_interrupts_mask(struct al_eth_adapter *);
197 static int al_eth_check_mtu(struct al_eth_adapter *, int);
198 static uint64_t al_get_counter(if_t, ift_counter);
199 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
200 static int al_eth_board_params_init(struct al_eth_adapter *);
201 static int al_media_update(if_t);
202 static void al_media_status(if_t, struct ifmediareq *);
203 static int al_eth_function_reset(struct al_eth_adapter *);
204 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
205 static void al_eth_serdes_init(struct al_eth_adapter *);
206 static void al_eth_lm_config(struct al_eth_adapter *);
207 static int al_eth_hw_init(struct al_eth_adapter *);
208
209 static void al_tick_stats(void *);
210
211 /* ifnet entry points */
212 static void al_init(void *);
213 static int al_mq_start(if_t, struct mbuf *);
214 static void al_qflush(if_t);
215 static int al_ioctl(if_t ifp, u_long, caddr_t);
216
217 /* bus entry points */
218 static int al_probe(device_t);
219 static int al_attach(device_t);
220 static int al_detach(device_t);
221 static int al_shutdown(device_t);
222
223 /* mii bus support routines */
224 static int al_miibus_readreg(device_t, int, int);
225 static int al_miibus_writereg(device_t, int, int, int);
226 static void al_miibus_statchg(device_t);
227 static void al_miibus_linkchg(device_t);
228
229 struct al_eth_adapter* g_adapters[16];
230 uint32_t g_adapters_count;
231
232 /* flag for napi-like mbuf processing, controlled from sysctl */
233 static int napi = 0;
234
235 static device_method_t al_methods[] = {
236 /* Device interface */
237 DEVMETHOD(device_probe, al_probe),
238 DEVMETHOD(device_attach, al_attach),
239 DEVMETHOD(device_detach, al_detach),
240 DEVMETHOD(device_shutdown, al_shutdown),
241
242 DEVMETHOD(miibus_readreg, al_miibus_readreg),
243 DEVMETHOD(miibus_writereg, al_miibus_writereg),
244 DEVMETHOD(miibus_statchg, al_miibus_statchg),
245 DEVMETHOD(miibus_linkchg, al_miibus_linkchg),
246 { 0, 0 }
247 };
248
249 static driver_t al_driver = {
250 "al",
251 al_methods,
252 sizeof(struct al_eth_adapter),
253 };
254
255 DRIVER_MODULE(al, pci, al_driver, 0, 0);
256 DRIVER_MODULE(miibus, al, miibus_driver, 0, 0);
257
258 static int
al_probe(device_t dev)259 al_probe(device_t dev)
260 {
261 if ((al_is_device_supported(dev)) != 0) {
262 device_set_desc(dev, "al");
263 return (BUS_PROBE_DEFAULT);
264 }
265 return (ENXIO);
266 }
267
268 static int
al_attach(device_t dev)269 al_attach(device_t dev)
270 {
271 struct al_eth_adapter *adapter;
272 struct sysctl_oid_list *child;
273 struct sysctl_ctx_list *ctx;
274 struct sysctl_oid *tree;
275 if_t ifp;
276 uint32_t dev_id;
277 uint32_t rev_id;
278 int bar_udma;
279 int bar_mac;
280 int bar_ec;
281 int err;
282
283 err = 0;
284 ifp = NULL;
285 dev_id = rev_id = 0;
286 ctx = device_get_sysctl_ctx(dev);
287 tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
288 child = SYSCTL_CHILDREN(tree);
289
290 if (g_adapters_count == 0) {
291 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
292 CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
293 }
294 adapter = device_get_softc(dev);
295 adapter->dev = dev;
296 adapter->board_type = ALPINE_INTEGRATED;
297 snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
298 device_get_nameunit(dev));
299 AL_RX_LOCK_INIT(adapter);
300
301 g_adapters[g_adapters_count] = adapter;
302
303 bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
304 adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
305 &bar_udma, RF_ACTIVE);
306 if (adapter->udma_res == NULL) {
307 device_printf(adapter->dev,
308 "could not allocate memory resources for DMA.\n");
309 err = ENOMEM;
310 goto err_res_dma;
311 }
312 adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
313 rman_get_bushandle(adapter->udma_res));
314 bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
315 adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
316 &bar_mac, RF_ACTIVE);
317 if (adapter->mac_res == NULL) {
318 device_printf(adapter->dev,
319 "could not allocate memory resources for MAC.\n");
320 err = ENOMEM;
321 goto err_res_mac;
322 }
323 adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
324 rman_get_bushandle(adapter->mac_res));
325
326 bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
327 adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
328 RF_ACTIVE);
329 if (adapter->ec_res == NULL) {
330 device_printf(adapter->dev,
331 "could not allocate memory resources for EC.\n");
332 err = ENOMEM;
333 goto err_res_ec;
334 }
335 adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
336 rman_get_bushandle(adapter->ec_res));
337
338 adapter->netdev = ifp = if_alloc(IFT_ETHER);
339
340 if_setsoftc(ifp, adapter);
341 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
342 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
343 if_setflags(ifp, if_getdrvflags(ifp));
344 if_setflagbits(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI, 0);
345 if_settransmitfn(ifp, al_mq_start);
346 if_setqflushfn(ifp, al_qflush);
347 if_setioctlfn(ifp, al_ioctl);
348 if_setinitfn(ifp, al_init);
349 if_setgetcounterfn(ifp, al_get_counter);
350 if_setmtu(ifp, AL_DEFAULT_MTU);
351
352 adapter->if_flags = if_getflags(ifp);
353
354 if_setcapabilities(ifp, if_getcapenable(ifp) );
355
356 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
357 IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
358 IFCAP_LRO | IFCAP_JUMBO_MTU, 0);
359
360 if_setcapenable(ifp, if_getcapabilities(ifp));
361
362 adapter->id_number = g_adapters_count;
363
364 if (adapter->board_type == ALPINE_INTEGRATED) {
365 dev_id = pci_get_device(adapter->dev);
366 rev_id = pci_get_revid(adapter->dev);
367 } else {
368 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
369 PCIR_DEVICE, &dev_id);
370 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
371 PCIR_REVID, &rev_id);
372 }
373
374 adapter->dev_id = dev_id;
375 adapter->rev_id = rev_id;
376
377 /* set default ring sizes */
378 adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
379 adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
380 adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
381 adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
382
383 adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
384 adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
385
386 adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN;
387 adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
388 adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
389
390 al_eth_req_rx_buff_size(adapter, if_getmtu(adapter->netdev));
391
392 adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
393
394 err = al_eth_board_params_init(adapter);
395 if (err != 0)
396 goto err;
397
398 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
399 ifmedia_init(&adapter->media, IFM_IMASK,
400 al_media_update, al_media_status);
401 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
402 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
403 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
404 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
405 }
406
407 al_eth_function_reset(adapter);
408
409 err = al_eth_hw_init_adapter(adapter);
410 if (err != 0)
411 goto err;
412
413 al_eth_init_rings(adapter);
414 g_adapters_count++;
415
416 al_eth_lm_config(adapter);
417 mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
418 mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
419 callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
420 callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
421
422 ether_ifattach(ifp, adapter->mac_addr);
423 if_setmtu(ifp, AL_DEFAULT_MTU);
424
425 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
426 al_eth_hw_init(adapter);
427
428 /* Attach PHY(s) */
429 err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
430 al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
431 MII_OFFSET_ANY, 0);
432 if (err != 0) {
433 device_printf(adapter->dev, "attaching PHYs failed\n");
434 return (err);
435 }
436
437 adapter->mii = device_get_softc(adapter->miibus);
438 }
439
440 return (err);
441
442 err:
443 bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
444 err_res_ec:
445 bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
446 err_res_mac:
447 bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
448 err_res_dma:
449 return (err);
450 }
451
452 static int
al_detach(device_t dev)453 al_detach(device_t dev)
454 {
455 struct al_eth_adapter *adapter;
456
457 adapter = device_get_softc(dev);
458 ether_ifdetach(adapter->netdev);
459
460 mtx_destroy(&adapter->stats_mtx);
461 mtx_destroy(&adapter->wd_mtx);
462
463 al_eth_down(adapter);
464
465 bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->irq_res);
466 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
467 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
468 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
469
470 return (0);
471 }
472
473 int
al_eth_fpga_read_pci_config(void * handle,int where,uint32_t * val)474 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
475 {
476
477 /* handle is the base address of the adapter */
478 *val = al_reg_read32((void*)((u_long)handle + where));
479
480 return (0);
481 }
482
483 int
al_eth_fpga_write_pci_config(void * handle,int where,uint32_t val)484 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
485 {
486
487 /* handle is the base address of the adapter */
488 al_reg_write32((void*)((u_long)handle + where), val);
489 return (0);
490 }
491
492 int
al_eth_read_pci_config(void * handle,int where,uint32_t * val)493 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
494 {
495
496 /* handle is a pci_dev */
497 *val = pci_read_config((device_t)handle, where, sizeof(*val));
498 return (0);
499 }
500
501 int
al_eth_write_pci_config(void * handle,int where,uint32_t val)502 al_eth_write_pci_config(void *handle, int where, uint32_t val)
503 {
504
505 /* handle is a pci_dev */
506 pci_write_config((device_t)handle, where, val, sizeof(val));
507 return (0);
508 }
509
510 void
al_eth_irq_config(uint32_t * offset,uint32_t value)511 al_eth_irq_config(uint32_t *offset, uint32_t value)
512 {
513
514 al_reg_write32_relaxed(offset, value);
515 }
516
517 void
al_eth_forward_int_config(uint32_t * offset,uint32_t value)518 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
519 {
520
521 al_reg_write32(offset, value);
522 }
523
524 static void
al_eth_serdes_init(struct al_eth_adapter * adapter)525 al_eth_serdes_init(struct al_eth_adapter *adapter)
526 {
527 void __iomem *serdes_base;
528
529 adapter->serdes_init = false;
530
531 serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
532 if (serdes_base == NULL) {
533 device_printf(adapter->dev, "serdes_base get failed!\n");
534 return;
535 }
536
537 serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
538
539 al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
540 &adapter->serdes_obj);
541
542 adapter->serdes_init = true;
543 }
544
545 static void
al_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)546 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
547 {
548 bus_addr_t *paddr;
549
550 paddr = arg;
551 *paddr = segs->ds_addr;
552 }
553
554 static int
al_dma_alloc_coherent(device_t dev,bus_dma_tag_t * tag,bus_dmamap_t * map,bus_addr_t * baddr,void ** vaddr,uint32_t size)555 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
556 bus_addr_t *baddr, void **vaddr, uint32_t size)
557 {
558 int ret;
559 uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
560
561 ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
562 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
563 maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
564 if (ret != 0) {
565 device_printf(dev,
566 "failed to create bus tag, ret = %d\n", ret);
567 return (ret);
568 }
569
570 ret = bus_dmamem_alloc(*tag, vaddr,
571 BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
572 if (ret != 0) {
573 device_printf(dev,
574 "failed to allocate dmamem, ret = %d\n", ret);
575 return (ret);
576 }
577
578 ret = bus_dmamap_load(*tag, *map, *vaddr,
579 size, al_dma_map_addr, baddr, 0);
580 if (ret != 0) {
581 device_printf(dev,
582 "failed to allocate bus_dmamap_load, ret = %d\n", ret);
583 return (ret);
584 }
585
586 return (0);
587 }
588
589 static void
al_dma_free_coherent(bus_dma_tag_t tag,bus_dmamap_t map,void * vaddr)590 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
591 {
592
593 bus_dmamap_unload(tag, map);
594 bus_dmamem_free(tag, vaddr, map);
595 bus_dma_tag_destroy(tag);
596 }
597
598 static void
al_eth_mac_table_unicast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)599 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
600 uint8_t idx, uint8_t udma_mask)
601 {
602 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
603
604 memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
605
606 memset(entry.mask, 0xff, sizeof(entry.mask));
607 entry.rx_valid = true;
608 entry.tx_valid = false;
609 entry.udma_mask = udma_mask;
610 entry.filter = false;
611
612 device_printf_dbg(adapter->dev,
613 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
614 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
615
616 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
617 }
618
619 static void
al_eth_mac_table_all_multicast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)620 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
621 uint8_t udma_mask)
622 {
623 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
624
625 memset(entry.addr, 0x00, sizeof(entry.addr));
626 memset(entry.mask, 0x00, sizeof(entry.mask));
627 entry.mask[0] |= 1;
628 entry.addr[0] |= 1;
629
630 entry.rx_valid = true;
631 entry.tx_valid = false;
632 entry.udma_mask = udma_mask;
633 entry.filter = false;
634
635 device_printf_dbg(adapter->dev,
636 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
637 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
638
639 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
640 }
641
642 static void
al_eth_mac_table_broadcast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)643 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
644 uint8_t idx, uint8_t udma_mask)
645 {
646 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
647
648 memset(entry.addr, 0xff, sizeof(entry.addr));
649 memset(entry.mask, 0xff, sizeof(entry.mask));
650
651 entry.rx_valid = true;
652 entry.tx_valid = false;
653 entry.udma_mask = udma_mask;
654 entry.filter = false;
655
656 device_printf_dbg(adapter->dev,
657 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
658 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
659
660 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
661 }
662
663 static void
al_eth_mac_table_promiscuous_set(struct al_eth_adapter * adapter,bool promiscuous)664 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
665 bool promiscuous)
666 {
667 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
668
669 memset(entry.addr, 0x00, sizeof(entry.addr));
670 memset(entry.mask, 0x00, sizeof(entry.mask));
671
672 entry.rx_valid = true;
673 entry.tx_valid = false;
674 entry.udma_mask = (promiscuous) ? 1 : 0;
675 entry.filter = (promiscuous) ? false : true;
676
677 device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
678 __func__, (promiscuous) ? "enter" : "exit");
679
680 al_eth_fwd_mac_table_set(&adapter->hal_adapter,
681 AL_ETH_MAC_TABLE_DROP_IDX, &entry);
682 }
683
684 static void
al_eth_set_thash_table_entry(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma,uint32_t queue)685 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
686 uint8_t udma, uint32_t queue)
687 {
688
689 if (udma != 0)
690 panic("only UDMA0 is supporter");
691
692 if (queue >= AL_ETH_NUM_QUEUES)
693 panic("invalid queue number");
694
695 al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
696 }
697
698 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
699 static void
al_eth_fsm_table_init(struct al_eth_adapter * adapter)700 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
701 {
702 uint32_t val;
703 int i;
704
705 for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
706 uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
707 switch (outer_type) {
708 case AL_ETH_FSM_ENTRY_IPV4_TCP:
709 case AL_ETH_FSM_ENTRY_IPV4_UDP:
710 case AL_ETH_FSM_ENTRY_IPV6_TCP:
711 case AL_ETH_FSM_ENTRY_IPV6_UDP:
712 val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
713 AL_ETH_FSM_DATA_HASH_SEL;
714 break;
715 case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
716 case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
717 val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
718 AL_ETH_FSM_DATA_HASH_SEL;
719 break;
720 default:
721 val = AL_ETH_FSM_DATA_DEFAULT_Q |
722 AL_ETH_FSM_DATA_DEFAULT_UDMA;
723 }
724 al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
725 }
726 }
727
728 static void
al_eth_mac_table_entry_clear(struct al_eth_adapter * adapter,uint8_t idx)729 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
730 uint8_t idx)
731 {
732 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
733
734 device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
735
736 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
737 }
738
739 static int
al_eth_hw_init_adapter(struct al_eth_adapter * adapter)740 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
741 {
742 struct al_eth_adapter_params *params = &adapter->eth_hal_params;
743 int rc;
744
745 /* params->dev_id = adapter->dev_id; */
746 params->rev_id = adapter->rev_id;
747 params->udma_id = 0;
748 params->enable_rx_parser = 1; /* enable rx epe parser*/
749 params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
750 params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
751 params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
752 params->name = adapter->name;
753 params->serdes_lane = adapter->serdes_lane;
754
755 rc = al_eth_adapter_init(&adapter->hal_adapter, params);
756 if (rc != 0)
757 device_printf(adapter->dev, "%s failed at hal init!\n",
758 __func__);
759
760 if ((adapter->board_type == ALPINE_NIC) ||
761 (adapter->board_type == ALPINE_FPGA_NIC)) {
762 /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
763 struct al_udma_gen_tgtid_conf conf;
764 int i;
765 for (i = 0; i < DMA_MAX_Q; i++) {
766 conf.tx_q_conf[i].queue_en = AL_TRUE;
767 conf.tx_q_conf[i].desc_en = AL_FALSE;
768 conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
769 conf.rx_q_conf[i].queue_en = AL_TRUE;
770 conf.rx_q_conf[i].desc_en = AL_FALSE;
771 conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
772 }
773 al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
774 }
775
776 return (rc);
777 }
778
779 static void
al_eth_lm_config(struct al_eth_adapter * adapter)780 al_eth_lm_config(struct al_eth_adapter *adapter)
781 {
782 struct al_eth_lm_init_params params = {0};
783
784 params.adapter = &adapter->hal_adapter;
785 params.serdes_obj = &adapter->serdes_obj;
786 params.lane = adapter->serdes_lane;
787 params.sfp_detection = adapter->sfp_detection_needed;
788 if (adapter->sfp_detection_needed == true) {
789 params.sfp_bus_id = adapter->i2c_adapter_id;
790 params.sfp_i2c_addr = SFP_I2C_ADDR;
791 }
792
793 if (adapter->sfp_detection_needed == false) {
794 switch (adapter->mac_mode) {
795 case AL_ETH_MAC_MODE_10GbE_Serial:
796 if ((adapter->lt_en != 0) && (adapter->an_en != 0))
797 params.default_mode = AL_ETH_LM_MODE_10G_DA;
798 else
799 params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
800 break;
801 case AL_ETH_MAC_MODE_SGMII:
802 params.default_mode = AL_ETH_LM_MODE_1G;
803 break;
804 default:
805 params.default_mode = AL_ETH_LM_MODE_10G_DA;
806 }
807 } else
808 params.default_mode = AL_ETH_LM_MODE_10G_DA;
809
810 params.link_training = adapter->lt_en;
811 params.rx_equal = true;
812 params.static_values = !adapter->dont_override_serdes;
813 params.i2c_context = adapter;
814 params.kr_fec_enable = false;
815
816 params.retimer_exist = adapter->retimer.exist;
817 params.retimer_bus_id = adapter->retimer.bus_id;
818 params.retimer_i2c_addr = adapter->retimer.i2c_addr;
819 params.retimer_channel = adapter->retimer.channel;
820
821 al_eth_lm_init(&adapter->lm_context, ¶ms);
822 }
823
824 static int
al_eth_board_params_init(struct al_eth_adapter * adapter)825 al_eth_board_params_init(struct al_eth_adapter *adapter)
826 {
827
828 if (adapter->board_type == ALPINE_NIC) {
829 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
830 adapter->sfp_detection_needed = false;
831 adapter->phy_exist = false;
832 adapter->an_en = false;
833 adapter->lt_en = false;
834 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
835 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
836 } else if (adapter->board_type == ALPINE_FPGA_NIC) {
837 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
838 adapter->sfp_detection_needed = false;
839 adapter->phy_exist = false;
840 adapter->an_en = false;
841 adapter->lt_en = false;
842 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
843 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
844 } else {
845 struct al_eth_board_params params;
846 int rc;
847
848 adapter->auto_speed = false;
849
850 rc = al_eth_board_params_get(adapter->mac_base, ¶ms);
851 if (rc != 0) {
852 device_printf(adapter->dev,
853 "board info not available\n");
854 return (-1);
855 }
856
857 adapter->phy_exist = params.phy_exist == true;
858 adapter->phy_addr = params.phy_mdio_addr;
859 adapter->an_en = params.autoneg_enable;
860 adapter->lt_en = params.kr_lt_enable;
861 adapter->serdes_grp = params.serdes_grp;
862 adapter->serdes_lane = params.serdes_lane;
863 adapter->sfp_detection_needed = params.sfp_plus_module_exist;
864 adapter->i2c_adapter_id = params.i2c_adapter_id;
865 adapter->ref_clk_freq = params.ref_clk_freq;
866 adapter->dont_override_serdes = params.dont_override_serdes;
867 adapter->link_config.active_duplex = !params.half_duplex;
868 adapter->link_config.autoneg = !params.an_disable;
869 adapter->link_config.force_1000_base_x = params.force_1000_base_x;
870 adapter->retimer.exist = params.retimer_exist;
871 adapter->retimer.bus_id = params.retimer_bus_id;
872 adapter->retimer.i2c_addr = params.retimer_i2c_addr;
873 adapter->retimer.channel = params.retimer_channel;
874
875 switch (params.speed) {
876 default:
877 device_printf(adapter->dev,
878 "%s: invalid speed (%d)\n", __func__, params.speed);
879 case AL_ETH_BOARD_1G_SPEED_1000M:
880 adapter->link_config.active_speed = 1000;
881 break;
882 case AL_ETH_BOARD_1G_SPEED_100M:
883 adapter->link_config.active_speed = 100;
884 break;
885 case AL_ETH_BOARD_1G_SPEED_10M:
886 adapter->link_config.active_speed = 10;
887 break;
888 }
889
890 switch (params.mdio_freq) {
891 default:
892 device_printf(adapter->dev,
893 "%s: invalid mdio freq (%d)\n", __func__,
894 params.mdio_freq);
895 case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
896 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
897 break;
898 case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
899 adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
900 break;
901 }
902
903 switch (params.media_type) {
904 case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
905 if (params.sfp_plus_module_exist == true)
906 /* Backward compatibility */
907 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
908 else
909 adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
910
911 adapter->use_lm = false;
912 break;
913 case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
914 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
915 adapter->use_lm = true;
916 break;
917 case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
918 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
919 adapter->use_lm = true;
920 break;
921 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
922 adapter->sfp_detection_needed = true;
923 adapter->auto_speed = false;
924 adapter->use_lm = true;
925 break;
926 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
927 adapter->sfp_detection_needed = true;
928 adapter->auto_speed = true;
929 adapter->mac_mode_set = false;
930 adapter->use_lm = true;
931
932 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
933 break;
934 default:
935 device_printf(adapter->dev,
936 "%s: unsupported media type %d\n",
937 __func__, params.media_type);
938 return (-1);
939 }
940
941 device_printf(adapter->dev,
942 "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
943 "SFP connected %s. media %d\n",
944 params.phy_exist ? "Yes" : "No",
945 params.phy_mdio_addr, adapter->mdio_freq,
946 params.sfp_plus_module_exist ? "Yes" : "No",
947 params.media_type);
948 }
949
950 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
951
952 return (0);
953 }
954
955 static int
al_eth_function_reset(struct al_eth_adapter * adapter)956 al_eth_function_reset(struct al_eth_adapter *adapter)
957 {
958 struct al_eth_board_params params;
959 int rc;
960
961 /* save board params so we restore it after reset */
962 al_eth_board_params_get(adapter->mac_base, ¶ms);
963 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
964 if (adapter->board_type == ALPINE_INTEGRATED)
965 rc = al_eth_flr_rmn(&al_eth_read_pci_config,
966 &al_eth_write_pci_config,
967 adapter->dev, adapter->mac_base);
968 else
969 rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
970 &al_eth_fpga_write_pci_config,
971 adapter->internal_pcie_base, adapter->mac_base);
972
973 /* restore params */
974 al_eth_board_params_set(adapter->mac_base, ¶ms);
975 al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
976
977 return (rc);
978 }
979
980 static void
al_eth_init_rings(struct al_eth_adapter * adapter)981 al_eth_init_rings(struct al_eth_adapter *adapter)
982 {
983 int i;
984
985 for (i = 0; i < adapter->num_tx_queues; i++) {
986 struct al_eth_ring *ring = &adapter->tx_ring[i];
987
988 ring->ring_id = i;
989 ring->dev = adapter->dev;
990 ring->adapter = adapter;
991 ring->netdev = adapter->netdev;
992 al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
993 &ring->dma_q);
994 ring->sw_count = adapter->tx_ring_count;
995 ring->hw_count = adapter->tx_descs_count;
996 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
997 ring->unmask_val = ~(1 << i);
998 }
999
1000 for (i = 0; i < adapter->num_rx_queues; i++) {
1001 struct al_eth_ring *ring = &adapter->rx_ring[i];
1002
1003 ring->ring_id = i;
1004 ring->dev = adapter->dev;
1005 ring->adapter = adapter;
1006 ring->netdev = adapter->netdev;
1007 al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1008 ring->sw_count = adapter->rx_ring_count;
1009 ring->hw_count = adapter->rx_descs_count;
1010 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1011 (struct unit_regs *)adapter->udma_base,
1012 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1013 ring->unmask_val = ~(1 << i);
1014 }
1015 }
1016
1017 static void
al_init_locked(void * arg)1018 al_init_locked(void *arg)
1019 {
1020 struct al_eth_adapter *adapter = arg;
1021 if_t ifp = adapter->netdev;
1022 int rc = 0;
1023
1024 al_eth_down(adapter);
1025 rc = al_eth_up(adapter);
1026
1027 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1028 if (rc == 0)
1029 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1030 }
1031
1032 static void
al_init(void * arg)1033 al_init(void *arg)
1034 {
1035 struct al_eth_adapter *adapter = arg;
1036
1037 al_init_locked(adapter);
1038 }
1039
1040 static inline int
al_eth_alloc_rx_buf(struct al_eth_adapter * adapter,struct al_eth_ring * rx_ring,struct al_eth_rx_buffer * rx_info)1041 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1042 struct al_eth_ring *rx_ring,
1043 struct al_eth_rx_buffer *rx_info)
1044 {
1045 struct al_buf *al_buf;
1046 bus_dma_segment_t segs[2];
1047 int error;
1048 int nsegs;
1049
1050 if (rx_info->m != NULL)
1051 return (0);
1052
1053 rx_info->data_size = adapter->rx_mbuf_sz;
1054
1055 AL_RX_LOCK(adapter);
1056
1057 /* Get mbuf using UMA allocator */
1058 rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1059 rx_info->data_size);
1060 AL_RX_UNLOCK(adapter);
1061
1062 if (rx_info->m == NULL)
1063 return (ENOMEM);
1064
1065 rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1066
1067 /* Map packets for DMA */
1068 error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1069 rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1070 if (__predict_false(error)) {
1071 device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1072 error);
1073 m_freem(rx_info->m);
1074 rx_info->m = NULL;
1075 return (EFAULT);
1076 }
1077
1078 al_buf = &rx_info->al_buf;
1079 al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1080 al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1081
1082 return (0);
1083 }
1084
1085 static int
al_eth_refill_rx_bufs(struct al_eth_adapter * adapter,unsigned int qid,unsigned int num)1086 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1087 unsigned int num)
1088 {
1089 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1090 uint16_t next_to_use;
1091 unsigned int i;
1092
1093 next_to_use = rx_ring->next_to_use;
1094
1095 for (i = 0; i < num; i++) {
1096 int rc;
1097 struct al_eth_rx_buffer *rx_info =
1098 &rx_ring->rx_buffer_info[next_to_use];
1099
1100 if (__predict_false(al_eth_alloc_rx_buf(adapter,
1101 rx_ring, rx_info) < 0)) {
1102 device_printf(adapter->dev,
1103 "failed to alloc buffer for rx queue %d\n", qid);
1104 break;
1105 }
1106
1107 rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1108 &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1109 if (__predict_false(rc)) {
1110 device_printf(adapter->dev,
1111 "failed to add buffer for rx queue %d\n", qid);
1112 break;
1113 }
1114
1115 next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1116 }
1117
1118 if (__predict_false(i < num))
1119 device_printf(adapter->dev,
1120 "refilled rx queue %d with %d pages only - available %d\n",
1121 qid, i, al_udma_available_get(rx_ring->dma_q));
1122
1123 if (__predict_true(i))
1124 al_eth_rx_buffer_action(rx_ring->dma_q, i);
1125
1126 rx_ring->next_to_use = next_to_use;
1127
1128 return (i);
1129 }
1130
1131 /*
1132 * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1133 * @adapter: board private structure
1134 */
1135 static void
al_eth_refill_all_rx_bufs(struct al_eth_adapter * adapter)1136 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1137 {
1138 int i;
1139
1140 for (i = 0; i < adapter->num_rx_queues; i++)
1141 al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1142 }
1143
1144 static void
al_eth_tx_do_cleanup(struct al_eth_ring * tx_ring)1145 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1146 {
1147 unsigned int total_done;
1148 uint16_t next_to_clean;
1149 int qid = tx_ring->ring_id;
1150
1151 total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1152 device_printf_dbg(tx_ring->dev,
1153 "tx_poll: q %d total completed descs %x\n", qid, total_done);
1154 next_to_clean = tx_ring->next_to_clean;
1155
1156 while (total_done != 0) {
1157 struct al_eth_tx_buffer *tx_info;
1158 struct mbuf *mbuf;
1159
1160 tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1161 /* stop if not all descriptors of the packet are completed */
1162 if (tx_info->tx_descs > total_done)
1163 break;
1164
1165 mbuf = tx_info->m;
1166
1167 tx_info->m = NULL;
1168
1169 device_printf_dbg(tx_ring->dev,
1170 "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1171
1172 /* map is no longer required */
1173 bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1174
1175 m_freem(mbuf);
1176 total_done -= tx_info->tx_descs;
1177 next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1178 }
1179
1180 tx_ring->next_to_clean = next_to_clean;
1181
1182 device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1183 qid, next_to_clean);
1184
1185 /*
1186 * need to make the rings circular update visible to
1187 * al_eth_start_xmit() before checking for netif_queue_stopped().
1188 */
1189 al_smp_data_memory_barrier();
1190 }
1191
1192 static void
al_eth_tx_csum(struct al_eth_ring * tx_ring,struct al_eth_tx_buffer * tx_info,struct al_eth_pkt * hal_pkt,struct mbuf * m)1193 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1194 struct al_eth_pkt *hal_pkt, struct mbuf *m)
1195 {
1196 uint32_t mss = m->m_pkthdr.tso_segsz;
1197 struct ether_vlan_header *eh;
1198 uint16_t etype;
1199 #ifdef INET
1200 struct ip *ip;
1201 #endif
1202 #ifdef INET6
1203 struct ip6_hdr *ip6;
1204 #endif
1205 struct tcphdr *th = NULL;
1206 int ehdrlen, ip_hlen = 0;
1207 uint8_t ipproto = 0;
1208 uint32_t offload = 0;
1209
1210 if (mss != 0)
1211 offload = 1;
1212
1213 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1214 offload = 1;
1215
1216 if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1217 offload = 1;
1218
1219 if (offload != 0) {
1220 struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1221
1222 if (mss != 0)
1223 hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1224 AL_ETH_TX_FLAGS_L4_CSUM);
1225 else
1226 hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1227 AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1228
1229 /*
1230 * Determine where frame payload starts.
1231 * Jump over vlan headers if already present,
1232 * helpful for QinQ too.
1233 */
1234 eh = mtod(m, struct ether_vlan_header *);
1235 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1236 etype = ntohs(eh->evl_proto);
1237 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1238 } else {
1239 etype = ntohs(eh->evl_encap_proto);
1240 ehdrlen = ETHER_HDR_LEN;
1241 }
1242
1243 switch (etype) {
1244 #ifdef INET
1245 case ETHERTYPE_IP:
1246 ip = (struct ip *)(m->m_data + ehdrlen);
1247 ip_hlen = ip->ip_hl << 2;
1248 ipproto = ip->ip_p;
1249 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1250 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1251 if (mss != 0)
1252 hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1253 if (ipproto == IPPROTO_TCP)
1254 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1255 else
1256 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1257 break;
1258 #endif /* INET */
1259 #ifdef INET6
1260 case ETHERTYPE_IPV6:
1261 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1262 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1263 ip_hlen = sizeof(struct ip6_hdr);
1264 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1265 ipproto = ip6->ip6_nxt;
1266 if (ipproto == IPPROTO_TCP)
1267 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1268 else
1269 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1270 break;
1271 #endif /* INET6 */
1272 default:
1273 break;
1274 }
1275
1276 meta->words_valid = 4;
1277 meta->l3_header_len = ip_hlen;
1278 meta->l3_header_offset = ehdrlen;
1279 if (th != NULL)
1280 meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1281 meta->mss_idx_sel = 0; /* check how to select MSS */
1282 meta->mss_val = mss;
1283 hal_pkt->meta = meta;
1284 } else
1285 hal_pkt->meta = NULL;
1286 }
1287
1288 #define XMIT_QUEUE_TIMEOUT 100
1289
1290 static void
al_eth_xmit_mbuf(struct al_eth_ring * tx_ring,struct mbuf * m)1291 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1292 {
1293 struct al_eth_tx_buffer *tx_info;
1294 int error;
1295 int nsegs, a;
1296 uint16_t next_to_use;
1297 bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1298 struct al_eth_pkt *hal_pkt;
1299 struct al_buf *al_buf;
1300 bool remap;
1301
1302 /* Check if queue is ready */
1303 if (unlikely(tx_ring->stall) != 0) {
1304 for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1305 if (al_udma_available_get(tx_ring->dma_q) >=
1306 (AL_ETH_DEFAULT_TX_HW_DESCS -
1307 AL_ETH_TX_WAKEUP_THRESH)) {
1308 tx_ring->stall = 0;
1309 break;
1310 }
1311 pause("stall", 1);
1312 }
1313 if (a == XMIT_QUEUE_TIMEOUT) {
1314 device_printf(tx_ring->dev,
1315 "timeout waiting for queue %d ready!\n",
1316 tx_ring->ring_id);
1317 return;
1318 } else {
1319 device_printf_dbg(tx_ring->dev,
1320 "queue %d is ready!\n", tx_ring->ring_id);
1321 }
1322 }
1323
1324 next_to_use = tx_ring->next_to_use;
1325 tx_info = &tx_ring->tx_buffer_info[next_to_use];
1326 tx_info->m = m;
1327 hal_pkt = &tx_info->hal_pkt;
1328
1329 if (m == NULL) {
1330 device_printf(tx_ring->dev, "mbuf is NULL\n");
1331 return;
1332 }
1333
1334 remap = true;
1335 /* Map packets for DMA */
1336 retry:
1337 error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1338 m, segs, &nsegs, BUS_DMA_NOWAIT);
1339 if (__predict_false(error)) {
1340 struct mbuf *m_new;
1341
1342 if (error == EFBIG) {
1343 /* Try it again? - one try */
1344 if (remap == true) {
1345 remap = false;
1346 m_new = m_defrag(m, M_NOWAIT);
1347 if (m_new == NULL) {
1348 device_printf(tx_ring->dev,
1349 "failed to defrag mbuf\n");
1350 goto exit;
1351 }
1352 m = m_new;
1353 goto retry;
1354 } else {
1355 device_printf(tx_ring->dev,
1356 "failed to map mbuf, error %d\n", error);
1357 goto exit;
1358 }
1359 } else {
1360 device_printf(tx_ring->dev,
1361 "failed to map mbuf, error %d\n", error);
1362 goto exit;
1363 }
1364 }
1365
1366 /* set flags and meta data */
1367 hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1368 al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1369
1370 al_buf = hal_pkt->bufs;
1371 for (a = 0; a < nsegs; a++) {
1372 al_buf->addr = segs[a].ds_addr;
1373 al_buf->len = segs[a].ds_len;
1374
1375 al_buf++;
1376 }
1377
1378 hal_pkt->num_of_bufs = nsegs;
1379
1380 /* prepare the packet's descriptors to dma engine */
1381 tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1382
1383 if (tx_info->tx_descs == 0)
1384 goto exit;
1385
1386 /*
1387 * stop the queue when no more space available, the packet can have up
1388 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1389 */
1390 if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1391 (AL_ETH_PKT_MAX_BUFS + 2))) {
1392 tx_ring->stall = 1;
1393 device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1394 tx_ring->ring_id);
1395 al_data_memory_barrier();
1396 }
1397
1398 tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1399
1400 /* trigger the dma engine */
1401 al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1402 return;
1403
1404 exit:
1405 m_freem(m);
1406 }
1407
1408 static void
al_eth_tx_cmpl_work(void * arg,int pending)1409 al_eth_tx_cmpl_work(void *arg, int pending)
1410 {
1411 struct al_eth_ring *tx_ring = arg;
1412
1413 if (napi != 0) {
1414 tx_ring->cmpl_is_running = 1;
1415 al_data_memory_barrier();
1416 }
1417
1418 al_eth_tx_do_cleanup(tx_ring);
1419
1420 if (napi != 0) {
1421 tx_ring->cmpl_is_running = 0;
1422 al_data_memory_barrier();
1423 }
1424 /* all work done, enable IRQs */
1425 al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1426 }
1427
1428 static int
al_eth_tx_cmlp_irq_filter(void * arg)1429 al_eth_tx_cmlp_irq_filter(void *arg)
1430 {
1431 struct al_eth_ring *tx_ring = arg;
1432
1433 /* Interrupt should be auto-masked upon arrival */
1434
1435 device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1436 tx_ring->ring_id);
1437
1438 /*
1439 * For napi, if work is not running, schedule it. Always schedule
1440 * for casual (non-napi) packet handling.
1441 */
1442 if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1443 taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1444
1445 /* Do not run bottom half */
1446 return (FILTER_HANDLED);
1447 }
1448
1449 static int
al_eth_rx_recv_irq_filter(void * arg)1450 al_eth_rx_recv_irq_filter(void *arg)
1451 {
1452 struct al_eth_ring *rx_ring = arg;
1453
1454 /* Interrupt should be auto-masked upon arrival */
1455
1456 device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1457 rx_ring->ring_id);
1458
1459 /*
1460 * For napi, if work is not running, schedule it. Always schedule
1461 * for casual (non-napi) packet handling.
1462 */
1463 if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1464 taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1465
1466 /* Do not run bottom half */
1467 return (FILTER_HANDLED);
1468 }
1469
1470 /*
1471 * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1472 * @adapter: structure containing adapter specific data
1473 * @hal_pkt: HAL structure for the packet
1474 * @mbuf: mbuf currently being received and modified
1475 */
1476 static inline void
al_eth_rx_checksum(struct al_eth_adapter * adapter,struct al_eth_pkt * hal_pkt,struct mbuf * mbuf)1477 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1478 struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1479 {
1480
1481 /* if IPv4 and error */
1482 if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM) &&
1483 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1484 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1485 device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1486 return;
1487 }
1488
1489 /* if IPv6 and error */
1490 if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM_IPV6) &&
1491 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1492 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1493 device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1494 return;
1495 }
1496
1497 /* if TCP/UDP */
1498 if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1499 (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1500 if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1501 device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1502
1503 /* TCP/UDP checksum error */
1504 mbuf->m_pkthdr.csum_flags = 0;
1505 } else {
1506 device_printf_dbg(adapter->dev, "rx checksum correct\n");
1507
1508 /* IP Checksum Good */
1509 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1510 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1511 }
1512 }
1513 }
1514
1515 static struct mbuf*
al_eth_rx_mbuf(struct al_eth_adapter * adapter,struct al_eth_ring * rx_ring,struct al_eth_pkt * hal_pkt,unsigned int descs,uint16_t * next_to_clean)1516 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1517 struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1518 unsigned int descs, uint16_t *next_to_clean)
1519 {
1520 struct mbuf *mbuf;
1521 struct al_eth_rx_buffer *rx_info =
1522 &rx_ring->rx_buffer_info[*next_to_clean];
1523 unsigned int len;
1524
1525 len = hal_pkt->bufs[0].len;
1526 device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1527 rx_info->m);
1528
1529 if (rx_info->m == NULL) {
1530 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1531 *next_to_clean);
1532 return (NULL);
1533 }
1534
1535 mbuf = rx_info->m;
1536 mbuf->m_pkthdr.len = len;
1537 mbuf->m_len = len;
1538 mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1539 mbuf->m_flags |= M_PKTHDR;
1540
1541 if (len <= adapter->small_copy_len) {
1542 struct mbuf *smbuf;
1543 device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1544
1545 AL_RX_LOCK(adapter);
1546 smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1547 AL_RX_UNLOCK(adapter);
1548 if (__predict_false(smbuf == NULL)) {
1549 device_printf(adapter->dev, "smbuf is NULL\n");
1550 return (NULL);
1551 }
1552
1553 smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1554 memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1555
1556 smbuf->m_len = len;
1557 smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1558
1559 /* first desc of a non-ps chain */
1560 smbuf->m_flags |= M_PKTHDR;
1561 smbuf->m_pkthdr.len = smbuf->m_len;
1562
1563 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1564 *next_to_clean);
1565
1566 return (smbuf);
1567 }
1568 mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1569
1570 /* Unmap the buffer */
1571 bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1572
1573 rx_info->m = NULL;
1574 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1575
1576 return (mbuf);
1577 }
1578
1579 static void
al_eth_rx_recv_work(void * arg,int pending)1580 al_eth_rx_recv_work(void *arg, int pending)
1581 {
1582 struct al_eth_ring *rx_ring = arg;
1583 struct mbuf *mbuf;
1584 unsigned int qid = rx_ring->ring_id;
1585 struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1586 uint16_t next_to_clean = rx_ring->next_to_clean;
1587 uint32_t refill_required;
1588 uint32_t refill_actual;
1589 uint32_t do_if_input;
1590
1591 if (napi != 0) {
1592 rx_ring->enqueue_is_running = 1;
1593 al_data_memory_barrier();
1594 }
1595
1596 do {
1597 unsigned int descs;
1598
1599 descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1600 if (unlikely(descs == 0))
1601 break;
1602
1603 device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1604 "from hal. descs %d\n", qid, descs);
1605 device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1606 "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1607 hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1608
1609 /* ignore if detected dma or eth controller errors */
1610 if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1611 AL_UDMA_CDESC_ERROR)) != 0) {
1612 device_printf(rx_ring->dev, "receive packet with error. "
1613 "flags = 0x%x\n", hal_pkt->flags);
1614 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1615 next_to_clean, descs);
1616 continue;
1617 }
1618
1619 /* allocate mbuf and fill it */
1620 mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1621 &next_to_clean);
1622
1623 /* exit if we failed to retrieve a buffer */
1624 if (unlikely(mbuf == NULL)) {
1625 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1626 next_to_clean, descs);
1627 break;
1628 }
1629
1630 if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM ||
1631 if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) {
1632 al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1633 }
1634
1635 mbuf->m_pkthdr.flowid = qid;
1636 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1637
1638 /*
1639 * LRO is only for IP/TCP packets and TCP checksum of the packet
1640 * should be computed by hardware.
1641 */
1642 do_if_input = 1;
1643 if ((rx_ring->lro_enabled != 0) &&
1644 ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1645 hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1646 /*
1647 * Send to the stack if:
1648 * - LRO not enabled, or
1649 * - no LRO resources, or
1650 * - lro enqueue fails
1651 */
1652 if (rx_ring->lro.lro_cnt != 0) {
1653 if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1654 do_if_input = 0;
1655 }
1656 }
1657
1658 if (do_if_input)
1659 if_input(rx_ring->netdev, mbuf);
1660
1661 } while (1);
1662
1663 rx_ring->next_to_clean = next_to_clean;
1664
1665 refill_required = al_udma_available_get(rx_ring->dma_q);
1666 refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1667 refill_required);
1668
1669 if (unlikely(refill_actual < refill_required)) {
1670 device_printf_dbg(rx_ring->dev,
1671 "%s: not filling rx queue %d\n", __func__, qid);
1672 }
1673
1674 tcp_lro_flush_all(&rx_ring->lro);
1675
1676 if (napi != 0) {
1677 rx_ring->enqueue_is_running = 0;
1678 al_data_memory_barrier();
1679 }
1680 /* unmask irq */
1681 al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1682 }
1683
1684 static void
al_eth_start_xmit(void * arg,int pending)1685 al_eth_start_xmit(void *arg, int pending)
1686 {
1687 struct al_eth_ring *tx_ring = arg;
1688 struct mbuf *mbuf;
1689
1690 if (napi != 0) {
1691 tx_ring->enqueue_is_running = 1;
1692 al_data_memory_barrier();
1693 }
1694
1695 while (1) {
1696 mtx_lock(&tx_ring->br_mtx);
1697 mbuf = drbr_dequeue(NULL, tx_ring->br);
1698 mtx_unlock(&tx_ring->br_mtx);
1699
1700 if (mbuf == NULL)
1701 break;
1702
1703 al_eth_xmit_mbuf(tx_ring, mbuf);
1704 }
1705
1706 if (napi != 0) {
1707 tx_ring->enqueue_is_running = 0;
1708 al_data_memory_barrier();
1709 while (1) {
1710 mtx_lock(&tx_ring->br_mtx);
1711 mbuf = drbr_dequeue(NULL, tx_ring->br);
1712 mtx_unlock(&tx_ring->br_mtx);
1713 if (mbuf == NULL)
1714 break;
1715 al_eth_xmit_mbuf(tx_ring, mbuf);
1716 }
1717 }
1718 }
1719
1720 static int
al_mq_start(if_t ifp,struct mbuf * m)1721 al_mq_start(if_t ifp, struct mbuf *m)
1722 {
1723 struct al_eth_adapter *adapter = if_getsoftc(ifp);
1724 struct al_eth_ring *tx_ring;
1725 int i;
1726 int ret;
1727
1728 /* Which queue to use */
1729 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1730 i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1731 else
1732 i = curcpu % adapter->num_tx_queues;
1733
1734 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1735 IFF_DRV_RUNNING) {
1736 return (EFAULT);
1737 }
1738
1739 tx_ring = &adapter->tx_ring[i];
1740
1741 device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1742 "sending packet to queue %d\n", i);
1743
1744 ret = drbr_enqueue(ifp, tx_ring->br, m);
1745
1746 /*
1747 * For napi, if work is not running, schedule it. Always schedule
1748 * for casual (non-napi) packet handling.
1749 */
1750 if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1751 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1752
1753 return (ret);
1754 }
1755
1756 static void
al_qflush(if_t ifp)1757 al_qflush(if_t ifp)
1758 {
1759
1760 /* unused */
1761 }
1762
1763 static inline void
al_eth_flow_ctrl_init(struct al_eth_adapter * adapter)1764 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1765 {
1766 uint8_t default_flow_ctrl;
1767
1768 default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1769 default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1770
1771 adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1772 }
1773
1774 static int
al_eth_flow_ctrl_config(struct al_eth_adapter * adapter)1775 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1776 {
1777 struct al_eth_flow_control_params *flow_ctrl_params;
1778 uint8_t active = adapter->link_config.flow_ctrl_active;
1779 int i;
1780
1781 flow_ctrl_params = &adapter->flow_ctrl_params;
1782
1783 flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1784 flow_ctrl_params->obay_enable =
1785 ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1786 flow_ctrl_params->gen_enable =
1787 ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1788
1789 flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1790 flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1791 flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1792 flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1793
1794 /* map priority to queue index, queue id = priority/2 */
1795 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1796 flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1);
1797
1798 al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1799
1800 return (0);
1801 }
1802
1803 static void
al_eth_flow_ctrl_enable(struct al_eth_adapter * adapter)1804 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1805 {
1806
1807 /*
1808 * change the active configuration to the default / force by ethtool
1809 * and call to configure
1810 */
1811 adapter->link_config.flow_ctrl_active =
1812 adapter->link_config.flow_ctrl_supported;
1813
1814 al_eth_flow_ctrl_config(adapter);
1815 }
1816
1817 static void
al_eth_flow_ctrl_disable(struct al_eth_adapter * adapter)1818 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1819 {
1820
1821 adapter->link_config.flow_ctrl_active = 0;
1822 al_eth_flow_ctrl_config(adapter);
1823 }
1824
1825 static int
al_eth_hw_init(struct al_eth_adapter * adapter)1826 al_eth_hw_init(struct al_eth_adapter *adapter)
1827 {
1828 int rc;
1829
1830 rc = al_eth_hw_init_adapter(adapter);
1831 if (rc != 0)
1832 return (rc);
1833
1834 rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1835 if (rc < 0) {
1836 device_printf(adapter->dev, "%s failed to configure mac!\n",
1837 __func__);
1838 return (rc);
1839 }
1840
1841 if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1842 (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1843 adapter->phy_exist == false)) {
1844 rc = al_eth_mac_link_config(&adapter->hal_adapter,
1845 adapter->link_config.force_1000_base_x,
1846 adapter->link_config.autoneg,
1847 adapter->link_config.active_speed,
1848 adapter->link_config.active_duplex);
1849 if (rc != 0) {
1850 device_printf(adapter->dev,
1851 "%s failed to configure link parameters!\n",
1852 __func__);
1853 return (rc);
1854 }
1855 }
1856
1857 rc = al_eth_mdio_config(&adapter->hal_adapter,
1858 AL_ETH_MDIO_TYPE_CLAUSE_22, AL_TRUE /* shared_mdio_if */,
1859 adapter->ref_clk_freq, adapter->mdio_freq);
1860 if (rc != 0) {
1861 device_printf(adapter->dev, "%s failed at mdio config!\n",
1862 __func__);
1863 return (rc);
1864 }
1865
1866 al_eth_flow_ctrl_init(adapter);
1867
1868 return (rc);
1869 }
1870
1871 static int
al_eth_hw_stop(struct al_eth_adapter * adapter)1872 al_eth_hw_stop(struct al_eth_adapter *adapter)
1873 {
1874
1875 al_eth_mac_stop(&adapter->hal_adapter);
1876
1877 /*
1878 * wait till pending rx packets written and UDMA becomes idle,
1879 * the MAC has ~10KB fifo, 10us should be enough time for the
1880 * UDMA to write to the memory
1881 */
1882 DELAY(10);
1883
1884 al_eth_adapter_stop(&adapter->hal_adapter);
1885
1886 adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1887
1888 /* disable flow ctrl to avoid pause packets*/
1889 al_eth_flow_ctrl_disable(adapter);
1890
1891 return (0);
1892 }
1893
1894 /*
1895 * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1896 * @irq: interrupt number
1897 * @data: pointer to a network interface device structure
1898 */
1899 static int
al_eth_intr_intx_all(void * data)1900 al_eth_intr_intx_all(void *data)
1901 {
1902 struct al_eth_adapter *adapter = data;
1903
1904 struct unit_regs __iomem *regs_base =
1905 (struct unit_regs __iomem *)adapter->udma_base;
1906 uint32_t reg;
1907
1908 reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1909 AL_INT_GROUP_A);
1910 if (likely(reg))
1911 device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1912 __func__, reg);
1913
1914 if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1915 struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1916 uint32_t cause_d = al_udma_iofic_read_cause(regs_base,
1917 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1918
1919 sec_ints_base =
1920 ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1921 if (cause_d != 0) {
1922 device_printf_dbg(adapter->dev,
1923 "got interrupt from group D. cause %x\n", cause_d);
1924
1925 cause_d = al_iofic_read_cause(sec_ints_base,
1926 AL_INT_GROUP_A);
1927 device_printf(adapter->dev,
1928 "secondary A cause %x\n", cause_d);
1929
1930 cause_d = al_iofic_read_cause(sec_ints_base,
1931 AL_INT_GROUP_B);
1932
1933 device_printf_dbg(adapter->dev,
1934 "secondary B cause %x\n", cause_d);
1935 }
1936 }
1937 if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1938 uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1939 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1940 int qid;
1941 device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1942 cause_b);
1943 for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1944 if (cause_b & (1 << qid)) {
1945 /* mask */
1946 al_udma_iofic_mask(
1947 (struct unit_regs __iomem *)adapter->udma_base,
1948 AL_UDMA_IOFIC_LEVEL_PRIMARY,
1949 AL_INT_GROUP_B, 1 << qid);
1950 }
1951 }
1952 }
1953 if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1954 uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1955 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1956 int qid;
1957 device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1958 for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1959 if ((cause_c & (1 << qid)) != 0) {
1960 al_udma_iofic_mask(
1961 (struct unit_regs __iomem *)adapter->udma_base,
1962 AL_UDMA_IOFIC_LEVEL_PRIMARY,
1963 AL_INT_GROUP_C, 1 << qid);
1964 }
1965 }
1966 }
1967
1968 al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1969
1970 return (0);
1971 }
1972
1973 static int
al_eth_intr_msix_all(void * data)1974 al_eth_intr_msix_all(void *data)
1975 {
1976 struct al_eth_adapter *adapter = data;
1977
1978 device_printf_dbg(adapter->dev, "%s\n", __func__);
1979 return (0);
1980 }
1981
1982 static int
al_eth_intr_msix_mgmt(void * data)1983 al_eth_intr_msix_mgmt(void *data)
1984 {
1985 struct al_eth_adapter *adapter = data;
1986
1987 device_printf_dbg(adapter->dev, "%s\n", __func__);
1988 return (0);
1989 }
1990
1991 static int
al_eth_enable_msix(struct al_eth_adapter * adapter)1992 al_eth_enable_msix(struct al_eth_adapter *adapter)
1993 {
1994 int i, msix_vecs, rc, count;
1995
1996 device_printf_dbg(adapter->dev, "%s\n", __func__);
1997 msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
1998
1999 device_printf_dbg(adapter->dev,
2000 "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2001
2002 adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2003 M_IFAL, M_ZERO | M_WAITOK);
2004 /* management vector (GROUP_A) @2*/
2005 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2006 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2007
2008 /* rx queues start @3 */
2009 for (i = 0; i < adapter->num_rx_queues; i++) {
2010 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2011
2012 adapter->msix_entries[irq_idx].entry = 3 + i;
2013 adapter->msix_entries[irq_idx].vector = 0;
2014 }
2015 /* tx queues start @7 */
2016 for (i = 0; i < adapter->num_tx_queues; i++) {
2017 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2018
2019 adapter->msix_entries[irq_idx].entry = 3 +
2020 AL_ETH_MAX_HW_QUEUES + i;
2021 adapter->msix_entries[irq_idx].vector = 0;
2022 }
2023
2024 count = msix_vecs + 2; /* entries start from 2 */
2025 rc = pci_alloc_msix(adapter->dev, &count);
2026
2027 if (rc != 0) {
2028 device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2029 "vectors %d\n", msix_vecs+2);
2030 device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2031 goto msix_entries_exit;
2032 }
2033
2034 if (count != msix_vecs + 2) {
2035 device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2036 "vectors %d, allocated %d\n", msix_vecs+2, count);
2037 rc = ENOSPC;
2038 goto msix_entries_exit;
2039 }
2040
2041 for (i = 0; i < msix_vecs; i++)
2042 adapter->msix_entries[i].vector = 2 + 1 + i;
2043
2044 device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2045 " vectors %d\n", msix_vecs);
2046
2047 adapter->msix_vecs = msix_vecs;
2048 adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2049 goto exit;
2050
2051 msix_entries_exit:
2052 adapter->msix_vecs = 0;
2053 free(adapter->msix_entries, M_IFAL);
2054 adapter->msix_entries = NULL;
2055
2056 exit:
2057 return (rc);
2058 }
2059
2060 static int
al_eth_setup_int_mode(struct al_eth_adapter * adapter)2061 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2062 {
2063 int i, rc;
2064
2065 rc = al_eth_enable_msix(adapter);
2066 if (rc != 0) {
2067 device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2068 return (rc);
2069 }
2070
2071 adapter->irq_vecs = max(1, adapter->msix_vecs);
2072 /* single INTX mode */
2073 if (adapter->msix_vecs == 0) {
2074 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2075 AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2076 device_get_name(adapter->dev));
2077 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2078 al_eth_intr_intx_all;
2079 /* IRQ vector will be resolved from device resources */
2080 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2081 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2082
2083 device_printf(adapter->dev, "%s and vector %d \n", __func__,
2084 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2085
2086 return (0);
2087 }
2088 /* single MSI-X mode */
2089 if (adapter->msix_vecs == 1) {
2090 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2091 AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2092 device_get_name(adapter->dev));
2093 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2094 al_eth_intr_msix_all;
2095 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2096 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2097 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2098
2099 return (0);
2100 }
2101 /* MSI-X per queue */
2102 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2103 "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2104 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2105
2106 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2107 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2108 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2109
2110 for (i = 0; i < adapter->num_rx_queues; i++) {
2111 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2112
2113 snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2114 "al-eth-rx-comp-%d@pci:%s", i,
2115 device_get_name(adapter->dev));
2116 adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2117 adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2118 adapter->irq_tbl[irq_idx].vector =
2119 adapter->msix_entries[irq_idx].vector;
2120 }
2121
2122 for (i = 0; i < adapter->num_tx_queues; i++) {
2123 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2124
2125 snprintf(adapter->irq_tbl[irq_idx].name,
2126 AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2127 device_get_name(adapter->dev));
2128 adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2129 adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2130 adapter->irq_tbl[irq_idx].vector =
2131 adapter->msix_entries[irq_idx].vector;
2132 }
2133
2134 return (0);
2135 }
2136
2137 static void
__al_eth_free_irq(struct al_eth_adapter * adapter)2138 __al_eth_free_irq(struct al_eth_adapter *adapter)
2139 {
2140 struct al_eth_irq *irq;
2141 int i, rc;
2142
2143 for (i = 0; i < adapter->irq_vecs; i++) {
2144 irq = &adapter->irq_tbl[i];
2145 if (irq->requested != 0) {
2146 device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2147 irq->vector);
2148 rc = bus_teardown_intr(adapter->dev, irq->res,
2149 irq->cookie);
2150 if (rc != 0)
2151 device_printf(adapter->dev, "failed to tear "
2152 "down irq: %d\n", irq->vector);
2153 }
2154 irq->requested = 0;
2155 }
2156 }
2157
2158 static void
al_eth_free_irq(struct al_eth_adapter * adapter)2159 al_eth_free_irq(struct al_eth_adapter *adapter)
2160 {
2161 struct al_eth_irq *irq;
2162 int i, rc;
2163 #ifdef CONFIG_RFS_ACCEL
2164 if (adapter->msix_vecs >= 1) {
2165 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2166 adapter->netdev->rx_cpu_rmap = NULL;
2167 }
2168 #endif
2169
2170 __al_eth_free_irq(adapter);
2171
2172 for (i = 0; i < adapter->irq_vecs; i++) {
2173 irq = &adapter->irq_tbl[i];
2174 if (irq->res == NULL)
2175 continue;
2176 device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2177 irq->vector);
2178 rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2179 irq->res);
2180 irq->res = NULL;
2181 if (rc != 0)
2182 device_printf(adapter->dev, "dev has no parent while "
2183 "releasing res for irq: %d\n", irq->vector);
2184 }
2185
2186 pci_release_msi(adapter->dev);
2187
2188 adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2189
2190 adapter->msix_vecs = 0;
2191 free(adapter->msix_entries, M_IFAL);
2192 adapter->msix_entries = NULL;
2193 }
2194
2195 static int
al_eth_request_irq(struct al_eth_adapter * adapter)2196 al_eth_request_irq(struct al_eth_adapter *adapter)
2197 {
2198 unsigned long flags;
2199 struct al_eth_irq *irq;
2200 int rc = 0, i, v;
2201
2202 if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2203 flags = RF_ACTIVE;
2204 else
2205 flags = RF_ACTIVE | RF_SHAREABLE;
2206
2207 for (i = 0; i < adapter->irq_vecs; i++) {
2208 irq = &adapter->irq_tbl[i];
2209
2210 if (irq->requested != 0)
2211 continue;
2212
2213 irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2214 &irq->vector, flags);
2215 if (irq->res == NULL) {
2216 device_printf(adapter->dev, "could not allocate "
2217 "irq vector=%d\n", irq->vector);
2218 rc = ENXIO;
2219 goto exit_res;
2220 }
2221
2222 if ((rc = bus_setup_intr(adapter->dev, irq->res,
2223 INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2224 NULL, irq->data, &irq->cookie)) != 0) {
2225 device_printf(adapter->dev, "failed to register "
2226 "interrupt handler for irq %ju: %d\n",
2227 (uintmax_t)rman_get_start(irq->res), rc);
2228 goto exit_intr;
2229 }
2230 irq->requested = 1;
2231 }
2232 goto exit;
2233
2234 exit_intr:
2235 v = i - 1; /* -1 because we omit the operation that failed */
2236 while (v-- >= 0) {
2237 int bti;
2238 irq = &adapter->irq_tbl[v];
2239 bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2240 if (bti != 0) {
2241 device_printf(adapter->dev, "failed to tear "
2242 "down irq: %d\n", irq->vector);
2243 }
2244
2245 irq->requested = 0;
2246 device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2247 irq->vector);
2248 }
2249
2250 exit_res:
2251 v = i - 1; /* -1 because we omit the operation that failed */
2252 while (v-- >= 0) {
2253 int brr;
2254 irq = &adapter->irq_tbl[v];
2255 device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2256 " for irq %d\n", irq->vector);
2257 brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2258 irq->vector, irq->res);
2259 if (brr != 0)
2260 device_printf(adapter->dev, "dev has no parent while "
2261 "releasing res for irq: %d\n", irq->vector);
2262 irq->res = NULL;
2263 }
2264
2265 exit:
2266 return (rc);
2267 }
2268
2269 /**
2270 * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2271 * @adapter: network interface device structure
2272 * @qid: queue index
2273 *
2274 * Return 0 on success, negative on failure
2275 **/
2276 static int
al_eth_setup_tx_resources(struct al_eth_adapter * adapter,int qid)2277 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2278 {
2279 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2280 device_t dev = tx_ring->dev;
2281 struct al_udma_q_params *q_params = &tx_ring->q_params;
2282 int size;
2283 int ret;
2284
2285 if (adapter->up)
2286 return (0);
2287
2288 size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2289
2290 tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2291 tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2292 q_params->size = tx_ring->hw_count;
2293
2294 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2295 (bus_dmamap_t *)&q_params->desc_phy_base_map,
2296 (bus_addr_t *)&q_params->desc_phy_base,
2297 (void**)&q_params->desc_base, tx_ring->descs_size);
2298 if (ret != 0) {
2299 device_printf(dev, "failed to al_dma_alloc_coherent,"
2300 " ret = %d\n", ret);
2301 return (ENOMEM);
2302 }
2303
2304 if (q_params->desc_base == NULL)
2305 return (ENOMEM);
2306
2307 device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2308
2309 /* Allocate Ring Queue */
2310 mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2311 tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2312 &tx_ring->br_mtx);
2313
2314 /* Allocate taskqueues */
2315 TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2316 tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2317 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2318 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2319 device_get_nameunit(adapter->dev));
2320 TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2321 tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2322 taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2323 taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2324 device_get_nameunit(adapter->dev));
2325
2326 /* Setup DMA descriptor areas. */
2327 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2328 1, 0, /* alignment, bounds */
2329 BUS_SPACE_MAXADDR, /* lowaddr */
2330 BUS_SPACE_MAXADDR, /* highaddr */
2331 NULL, NULL, /* filter, filterarg */
2332 AL_TSO_SIZE, /* maxsize */
2333 AL_ETH_PKT_MAX_BUFS, /* nsegments */
2334 PAGE_SIZE, /* maxsegsize */
2335 0, /* flags */
2336 NULL, /* lockfunc */
2337 NULL, /* lockfuncarg */
2338 &tx_ring->dma_buf_tag);
2339
2340 if (ret != 0) {
2341 device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2342 ret);
2343 return (ret);
2344 }
2345
2346 for (size = 0; size < tx_ring->sw_count; size++) {
2347 ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2348 &tx_ring->tx_buffer_info[size].dma_map);
2349 if (ret != 0) {
2350 device_printf(dev, "Unable to map DMA TX "
2351 "buffer memory [iter=%d]\n", size);
2352 return (ret);
2353 }
2354 }
2355
2356 /* completion queue not used for tx */
2357 q_params->cdesc_base = NULL;
2358 /* size in bytes of the udma completion ring descriptor */
2359 q_params->cdesc_size = 8;
2360 tx_ring->next_to_use = 0;
2361 tx_ring->next_to_clean = 0;
2362
2363 return (0);
2364 }
2365
2366 /*
2367 * al_eth_free_tx_resources - Free Tx Resources per Queue
2368 * @adapter: network interface device structure
2369 * @qid: queue index
2370 *
2371 * Free all transmit software resources
2372 */
2373 static void
al_eth_free_tx_resources(struct al_eth_adapter * adapter,int qid)2374 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2375 {
2376 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2377 struct al_udma_q_params *q_params = &tx_ring->q_params;
2378 int size;
2379
2380 /* At this point interrupts' handlers must be deactivated */
2381 while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2382 taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2383
2384 taskqueue_free(tx_ring->cmpl_tq);
2385 while (taskqueue_cancel(tx_ring->enqueue_tq,
2386 &tx_ring->enqueue_task, NULL)) {
2387 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2388 }
2389
2390 taskqueue_free(tx_ring->enqueue_tq);
2391
2392 if (tx_ring->br != NULL) {
2393 drbr_flush(adapter->netdev, tx_ring->br);
2394 buf_ring_free(tx_ring->br, M_DEVBUF);
2395 }
2396
2397 for (size = 0; size < tx_ring->sw_count; size++) {
2398 m_freem(tx_ring->tx_buffer_info[size].m);
2399 tx_ring->tx_buffer_info[size].m = NULL;
2400
2401 bus_dmamap_unload(tx_ring->dma_buf_tag,
2402 tx_ring->tx_buffer_info[size].dma_map);
2403 bus_dmamap_destroy(tx_ring->dma_buf_tag,
2404 tx_ring->tx_buffer_info[size].dma_map);
2405 }
2406 bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2407
2408 free(tx_ring->tx_buffer_info, M_IFAL);
2409 tx_ring->tx_buffer_info = NULL;
2410
2411 mtx_destroy(&tx_ring->br_mtx);
2412
2413 /* if not set, then don't free */
2414 if (q_params->desc_base == NULL)
2415 return;
2416
2417 al_dma_free_coherent(q_params->desc_phy_base_tag,
2418 q_params->desc_phy_base_map, q_params->desc_base);
2419
2420 q_params->desc_base = NULL;
2421 }
2422
2423 /*
2424 * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2425 * @adapter: board private structure
2426 *
2427 * Free all transmit software resources
2428 */
2429 static void
al_eth_free_all_tx_resources(struct al_eth_adapter * adapter)2430 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2431 {
2432 int i;
2433
2434 for (i = 0; i < adapter->num_tx_queues; i++)
2435 if (adapter->tx_ring[i].q_params.desc_base)
2436 al_eth_free_tx_resources(adapter, i);
2437 }
2438
2439 /*
2440 * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2441 * @adapter: network interface device structure
2442 * @qid: queue index
2443 *
2444 * Returns 0 on success, negative on failure
2445 */
2446 static int
al_eth_setup_rx_resources(struct al_eth_adapter * adapter,unsigned int qid)2447 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2448 {
2449 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2450 device_t dev = rx_ring->dev;
2451 struct al_udma_q_params *q_params = &rx_ring->q_params;
2452 int size;
2453 int ret;
2454
2455 size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2456
2457 /* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2458 size += 1;
2459
2460 rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2461 rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2462 q_params->size = rx_ring->hw_count;
2463
2464 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2465 &q_params->desc_phy_base_map,
2466 (bus_addr_t *)&q_params->desc_phy_base,
2467 (void**)&q_params->desc_base, rx_ring->descs_size);
2468
2469 if ((q_params->desc_base == NULL) || (ret != 0))
2470 return (ENOMEM);
2471
2472 /* size in bytes of the udma completion ring descriptor */
2473 q_params->cdesc_size = 16;
2474 rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2475 ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2476 &q_params->cdesc_phy_base_map,
2477 (bus_addr_t *)&q_params->cdesc_phy_base,
2478 (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2479
2480 if ((q_params->cdesc_base == NULL) || (ret != 0))
2481 return (ENOMEM);
2482
2483 /* Allocate taskqueues */
2484 NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2485 rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2486 taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2487 taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2488 device_get_nameunit(adapter->dev));
2489
2490 /* Setup DMA descriptor areas. */
2491 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2492 1, 0, /* alignment, bounds */
2493 BUS_SPACE_MAXADDR, /* lowaddr */
2494 BUS_SPACE_MAXADDR, /* highaddr */
2495 NULL, NULL, /* filter, filterarg */
2496 AL_TSO_SIZE, /* maxsize */
2497 1, /* nsegments */
2498 AL_TSO_SIZE, /* maxsegsize */
2499 0, /* flags */
2500 NULL, /* lockfunc */
2501 NULL, /* lockfuncarg */
2502 &rx_ring->dma_buf_tag);
2503
2504 if (ret != 0) {
2505 device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2506 return (ret);
2507 }
2508
2509 for (size = 0; size < rx_ring->sw_count; size++) {
2510 ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2511 &rx_ring->rx_buffer_info[size].dma_map);
2512 if (ret != 0) {
2513 device_printf(dev,"Unable to map DMA RX buffer memory\n");
2514 return (ret);
2515 }
2516 }
2517
2518 /* Zero out the descriptor ring */
2519 memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2520
2521 /* Create LRO for the ring */
2522 if ((if_getcapenable(adapter->netdev) & IFCAP_LRO) != 0) {
2523 int err = tcp_lro_init(&rx_ring->lro);
2524 if (err != 0) {
2525 device_printf(adapter->dev,
2526 "LRO[%d] Initialization failed!\n", qid);
2527 } else {
2528 device_printf_dbg(adapter->dev,
2529 "RX Soft LRO[%d] Initialized\n", qid);
2530 rx_ring->lro_enabled = true;
2531 rx_ring->lro.ifp = adapter->netdev;
2532 }
2533 }
2534
2535 rx_ring->next_to_clean = 0;
2536 rx_ring->next_to_use = 0;
2537
2538 return (0);
2539 }
2540
2541 /*
2542 * al_eth_free_rx_resources - Free Rx Resources
2543 * @adapter: network interface device structure
2544 * @qid: queue index
2545 *
2546 * Free all receive software resources
2547 */
2548 static void
al_eth_free_rx_resources(struct al_eth_adapter * adapter,unsigned int qid)2549 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2550 {
2551 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2552 struct al_udma_q_params *q_params = &rx_ring->q_params;
2553 int size;
2554
2555 /* At this point interrupts' handlers must be deactivated */
2556 while (taskqueue_cancel(rx_ring->enqueue_tq,
2557 &rx_ring->enqueue_task, NULL)) {
2558 taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2559 }
2560
2561 taskqueue_free(rx_ring->enqueue_tq);
2562
2563 for (size = 0; size < rx_ring->sw_count; size++) {
2564 m_freem(rx_ring->rx_buffer_info[size].m);
2565 rx_ring->rx_buffer_info[size].m = NULL;
2566 bus_dmamap_unload(rx_ring->dma_buf_tag,
2567 rx_ring->rx_buffer_info[size].dma_map);
2568 bus_dmamap_destroy(rx_ring->dma_buf_tag,
2569 rx_ring->rx_buffer_info[size].dma_map);
2570 }
2571 bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2572
2573 free(rx_ring->rx_buffer_info, M_IFAL);
2574 rx_ring->rx_buffer_info = NULL;
2575
2576 /* if not set, then don't free */
2577 if (q_params->desc_base == NULL)
2578 return;
2579
2580 al_dma_free_coherent(q_params->desc_phy_base_tag,
2581 q_params->desc_phy_base_map, q_params->desc_base);
2582
2583 q_params->desc_base = NULL;
2584
2585 /* if not set, then don't free */
2586 if (q_params->cdesc_base == NULL)
2587 return;
2588
2589 al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2590 q_params->cdesc_phy_base_map, q_params->cdesc_base);
2591
2592 q_params->cdesc_phy_base = 0;
2593
2594 /* Free LRO resources */
2595 tcp_lro_free(&rx_ring->lro);
2596 }
2597
2598 /*
2599 * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2600 * @adapter: board private structure
2601 *
2602 * Free all receive software resources
2603 */
2604 static void
al_eth_free_all_rx_resources(struct al_eth_adapter * adapter)2605 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2606 {
2607 int i;
2608
2609 for (i = 0; i < adapter->num_rx_queues; i++)
2610 if (adapter->rx_ring[i].q_params.desc_base != 0)
2611 al_eth_free_rx_resources(adapter, i);
2612 }
2613
2614 /*
2615 * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2616 * @adapter: board private structure
2617 *
2618 * Return 0 on success, negative on failure
2619 */
2620 static int
al_eth_setup_all_rx_resources(struct al_eth_adapter * adapter)2621 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2622 {
2623 int i, rc = 0;
2624
2625 for (i = 0; i < adapter->num_rx_queues; i++) {
2626 rc = al_eth_setup_rx_resources(adapter, i);
2627 if (rc == 0)
2628 continue;
2629
2630 device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2631 goto err_setup_rx;
2632 }
2633 return (0);
2634
2635 err_setup_rx:
2636 /* rewind the index freeing the rings as we go */
2637 while (i--)
2638 al_eth_free_rx_resources(adapter, i);
2639 return (rc);
2640 }
2641
2642 /*
2643 * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2644 * @adapter: private structure
2645 *
2646 * Return 0 on success, negative on failure
2647 */
2648 static int
al_eth_setup_all_tx_resources(struct al_eth_adapter * adapter)2649 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2650 {
2651 int i, rc = 0;
2652
2653 for (i = 0; i < adapter->num_tx_queues; i++) {
2654 rc = al_eth_setup_tx_resources(adapter, i);
2655 if (rc == 0)
2656 continue;
2657
2658 device_printf(adapter->dev,
2659 "Allocation for Tx Queue %u failed\n", i);
2660 goto err_setup_tx;
2661 }
2662
2663 return (0);
2664
2665 err_setup_tx:
2666 /* rewind the index freeing the rings as we go */
2667 while (i--)
2668 al_eth_free_tx_resources(adapter, i);
2669
2670 return (rc);
2671 }
2672
2673 static void
al_eth_disable_int_sync(struct al_eth_adapter * adapter)2674 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2675 {
2676
2677 /* disable forwarding interrupts from eth through pci end point */
2678 if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2679 (adapter->board_type == ALPINE_NIC)) {
2680 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2681 AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2682 }
2683
2684 /* mask hw interrupts */
2685 al_eth_interrupts_mask(adapter);
2686 }
2687
2688 static void
al_eth_interrupts_unmask(struct al_eth_adapter * adapter)2689 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2690 {
2691 uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2692 uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2693 uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2694 uint32_t group_d_mask = 3 << 8;
2695 struct unit_regs __iomem *regs_base =
2696 (struct unit_regs __iomem *)adapter->udma_base;
2697
2698 if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2699 group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2700 AL_INT_GROUP_A_GROUP_C_SUM |
2701 AL_INT_GROUP_A_GROUP_D_SUM;
2702
2703 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2704 AL_INT_GROUP_A, group_a_mask);
2705 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2706 AL_INT_GROUP_B, group_b_mask);
2707 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2708 AL_INT_GROUP_C, group_c_mask);
2709 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2710 AL_INT_GROUP_D, group_d_mask);
2711 }
2712
2713 static void
al_eth_interrupts_mask(struct al_eth_adapter * adapter)2714 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2715 {
2716 struct unit_regs __iomem *regs_base =
2717 (struct unit_regs __iomem *)adapter->udma_base;
2718
2719 /* mask all interrupts */
2720 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2721 AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2722 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2723 AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2724 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2725 AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2726 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2727 AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2728 }
2729
2730 static int
al_eth_configure_int_mode(struct al_eth_adapter * adapter)2731 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2732 {
2733 enum al_iofic_mode int_mode;
2734 uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2735 uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2736 uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2737 uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2738
2739 /* single INTX mode */
2740 if (adapter->msix_vecs == 0)
2741 int_mode = AL_IOFIC_MODE_LEGACY;
2742 else if (adapter->msix_vecs > 1)
2743 int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2744 else {
2745 device_printf(adapter->dev,
2746 "udma doesn't support single MSI-X mode yet.\n");
2747 return (EIO);
2748 }
2749
2750 if (adapter->board_type != ALPINE_INTEGRATED) {
2751 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2752 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2753 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2754 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2755 }
2756
2757 if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2758 int_mode, m2s_errors_disable, m2s_aborts_disable,
2759 s2m_errors_disable, s2m_aborts_disable)) {
2760 device_printf(adapter->dev,
2761 "al_udma_unit_int_config failed!.\n");
2762 return (EIO);
2763 }
2764 adapter->int_mode = int_mode;
2765 device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2766 int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2767 int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2768 /* set interrupt moderation resolution to 15us */
2769 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2770 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2771 /* by default interrupt coalescing is disabled */
2772 adapter->tx_usecs = 0;
2773 adapter->rx_usecs = 0;
2774
2775 return (0);
2776 }
2777
2778 /*
2779 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2780 * @index: Index in RX flow hash indirection table
2781 * @n_rx_rings: Number of RX rings to use
2782 *
2783 * This function provides the default policy for RX flow hash indirection.
2784 */
2785 static inline uint32_t
ethtool_rxfh_indir_default(uint32_t index,uint32_t n_rx_rings)2786 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2787 {
2788
2789 return (index % n_rx_rings);
2790 }
2791
2792 static void*
al_eth_update_stats(struct al_eth_adapter * adapter)2793 al_eth_update_stats(struct al_eth_adapter *adapter)
2794 {
2795 struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2796
2797 if (adapter->up == 0)
2798 return (NULL);
2799
2800 al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2801
2802 return (NULL);
2803 }
2804
2805 static uint64_t
al_get_counter(if_t ifp,ift_counter cnt)2806 al_get_counter(if_t ifp, ift_counter cnt)
2807 {
2808 struct al_eth_adapter *adapter;
2809 struct al_eth_mac_stats *mac_stats;
2810 uint64_t rv;
2811
2812 adapter = if_getsoftc(ifp);
2813 mac_stats = &adapter->mac_stats;
2814
2815 switch (cnt) {
2816 case IFCOUNTER_IPACKETS:
2817 return (mac_stats->aFramesReceivedOK); /* including pause frames */
2818 case IFCOUNTER_OPACKETS:
2819 return (mac_stats->aFramesTransmittedOK);
2820 case IFCOUNTER_IBYTES:
2821 return (mac_stats->aOctetsReceivedOK);
2822 case IFCOUNTER_OBYTES:
2823 return (mac_stats->aOctetsTransmittedOK);
2824 case IFCOUNTER_IMCASTS:
2825 return (mac_stats->ifInMulticastPkts);
2826 case IFCOUNTER_OMCASTS:
2827 return (mac_stats->ifOutMulticastPkts);
2828 case IFCOUNTER_COLLISIONS:
2829 return (0);
2830 case IFCOUNTER_IQDROPS:
2831 return (mac_stats->etherStatsDropEvents);
2832 case IFCOUNTER_IERRORS:
2833 rv = mac_stats->ifInErrors +
2834 mac_stats->etherStatsUndersizePkts + /* good but short */
2835 mac_stats->etherStatsFragments + /* short and bad*/
2836 mac_stats->etherStatsJabbers + /* with crc errors */
2837 mac_stats->etherStatsOversizePkts +
2838 mac_stats->aFrameCheckSequenceErrors +
2839 mac_stats->aAlignmentErrors;
2840 return (rv);
2841 case IFCOUNTER_OERRORS:
2842 return (mac_stats->ifOutErrors);
2843 default:
2844 return (if_get_counter_default(ifp, cnt));
2845 }
2846 }
2847
2848 static u_int
al_count_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2849 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2850 {
2851 unsigned char *mac;
2852
2853 mac = LLADDR(sdl);
2854 /* default mc address inside mac address */
2855 if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2856 return (1);
2857 else
2858 return (0);
2859 }
2860
2861 static u_int
al_program_addr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2862 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2863 {
2864 struct al_eth_adapter *adapter = arg;
2865
2866 al_eth_mac_table_unicast_add(adapter,
2867 AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1);
2868
2869 return (1);
2870 }
2871
2872 /*
2873 * Unicast, Multicast and Promiscuous mode set
2874 *
2875 * The set_rx_mode entry point is called whenever the unicast or multicast
2876 * address lists or the network interface flags are updated. This routine is
2877 * responsible for configuring the hardware for proper unicast, multicast,
2878 * promiscuous mode, and all-multi behavior.
2879 */
2880 static void
al_eth_set_rx_mode(struct al_eth_adapter * adapter)2881 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2882 {
2883 if_t ifp = adapter->netdev;
2884 int mc, uc;
2885 uint8_t i;
2886
2887 /* XXXGL: why generic count won't work? */
2888 mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL);
2889 uc = if_lladdr_count(ifp);
2890
2891 if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2892 al_eth_mac_table_promiscuous_set(adapter, true);
2893 } else {
2894 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2895 /* This interface is in all-multicasts mode (used by multicast routers). */
2896 al_eth_mac_table_all_multicast_add(adapter,
2897 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2898 } else {
2899 if (mc == 0) {
2900 al_eth_mac_table_entry_clear(adapter,
2901 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2902 } else {
2903 al_eth_mac_table_all_multicast_add(adapter,
2904 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2905 }
2906 }
2907 if (uc != 0) {
2908 i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2909 if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2910 /*
2911 * In this case there are more addresses then
2912 * entries in the mac table - set promiscuous
2913 */
2914 al_eth_mac_table_promiscuous_set(adapter, true);
2915 return;
2916 }
2917
2918 /* clear the last configuration */
2919 while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2920 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2921 al_eth_mac_table_entry_clear(adapter, i);
2922 i++;
2923 }
2924
2925 /* set new addresses */
2926 if_foreach_lladdr(ifp, al_program_addr, adapter);
2927 }
2928 al_eth_mac_table_promiscuous_set(adapter, false);
2929 }
2930 }
2931
2932 static void
al_eth_config_rx_fwd(struct al_eth_adapter * adapter)2933 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2934 {
2935 struct al_eth_fwd_ctrl_table_entry entry;
2936 int i;
2937
2938 /* let priority be equal to pbits */
2939 for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2940 al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2941
2942 /* map priority to queue index, queue id = priority/2 */
2943 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2944 al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2945
2946 entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2947 entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2948 entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2949 entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2950 entry.filter = false;
2951
2952 al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry);
2953
2954 /*
2955 * By default set the mac table to forward all unicast packets to our
2956 * MAC address and all broadcast. all the rest will be dropped.
2957 */
2958 al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2959 1);
2960 al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
2961 al_eth_mac_table_promiscuous_set(adapter, false);
2962
2963 /* set toeplitz hash keys */
2964 for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
2965 *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
2966
2967 for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
2968 al_eth_hash_key_set(&adapter->hal_adapter, i,
2969 htonl(adapter->toeplitz_hash_key[i]));
2970
2971 for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
2972 adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
2973 AL_ETH_NUM_QUEUES);
2974 al_eth_set_thash_table_entry(adapter, i, 0,
2975 adapter->rss_ind_tbl[i]);
2976 }
2977
2978 al_eth_fsm_table_init(adapter);
2979 }
2980
2981 static void
al_eth_req_rx_buff_size(struct al_eth_adapter * adapter,int size)2982 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
2983 {
2984
2985 /*
2986 * Determine the correct mbuf pool
2987 * for doing jumbo frames
2988 * Try from the smallest up to maximum supported
2989 */
2990 adapter->rx_mbuf_sz = MCLBYTES;
2991 if (size > 2048) {
2992 if (adapter->max_rx_buff_alloc_size > 2048)
2993 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2994 else
2995 return;
2996 }
2997 if (size > 4096) {
2998 if (adapter->max_rx_buff_alloc_size > 4096)
2999 adapter->rx_mbuf_sz = MJUM9BYTES;
3000 else
3001 return;
3002 }
3003 if (size > 9216) {
3004 if (adapter->max_rx_buff_alloc_size > 9216)
3005 adapter->rx_mbuf_sz = MJUM16BYTES;
3006 else
3007 return;
3008 }
3009 }
3010
3011 static int
al_eth_change_mtu(struct al_eth_adapter * adapter,int new_mtu)3012 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3013 {
3014 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3015 ETHER_VLAN_ENCAP_LEN;
3016
3017 al_eth_req_rx_buff_size(adapter, new_mtu);
3018
3019 device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3020 al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3021 AL_ETH_MIN_FRAME_LEN, max_frame);
3022
3023 al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3024
3025 return (0);
3026 }
3027
3028 static int
al_eth_check_mtu(struct al_eth_adapter * adapter,int new_mtu)3029 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3030 {
3031 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3032
3033 if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3034 (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3035 return (EINVAL);
3036 }
3037
3038 return (0);
3039 }
3040
3041 static int
al_eth_udma_queue_enable(struct al_eth_adapter * adapter,enum al_udma_type type,int qid)3042 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3043 int qid)
3044 {
3045 int rc = 0;
3046 char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3047 struct al_udma_q_params *q_params;
3048
3049 if (type == UDMA_TX)
3050 q_params = &adapter->tx_ring[qid].q_params;
3051 else
3052 q_params = &adapter->rx_ring[qid].q_params;
3053
3054 rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3055 if (rc < 0) {
3056 device_printf(adapter->dev, "config %s queue %u failed\n", name,
3057 qid);
3058 return (rc);
3059 }
3060 return (rc);
3061 }
3062
3063 static int
al_eth_udma_queues_enable_all(struct al_eth_adapter * adapter)3064 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3065 {
3066 int i;
3067
3068 for (i = 0; i < adapter->num_tx_queues; i++)
3069 al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3070
3071 for (i = 0; i < adapter->num_rx_queues; i++)
3072 al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3073
3074 return (0);
3075 }
3076
3077 static void
al_eth_up_complete(struct al_eth_adapter * adapter)3078 al_eth_up_complete(struct al_eth_adapter *adapter)
3079 {
3080
3081 al_eth_configure_int_mode(adapter);
3082 al_eth_config_rx_fwd(adapter);
3083 al_eth_change_mtu(adapter, if_getmtu(adapter->netdev));
3084 al_eth_udma_queues_enable_all(adapter);
3085 al_eth_refill_all_rx_bufs(adapter);
3086 al_eth_interrupts_unmask(adapter);
3087
3088 /* enable forwarding interrupts from eth through pci end point */
3089 if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3090 (adapter->board_type == ALPINE_NIC)) {
3091 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3092 AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3093 }
3094
3095 al_eth_flow_ctrl_enable(adapter);
3096
3097 mtx_lock(&adapter->stats_mtx);
3098 callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3099 mtx_unlock(&adapter->stats_mtx);
3100
3101 al_eth_mac_start(&adapter->hal_adapter);
3102 }
3103
3104 static int
al_media_update(if_t ifp)3105 al_media_update(if_t ifp)
3106 {
3107 struct al_eth_adapter *adapter = if_getsoftc(ifp);
3108
3109 if ((if_getflags(ifp) & IFF_UP) != 0)
3110 mii_mediachg(adapter->mii);
3111
3112 return (0);
3113 }
3114
3115 static void
al_media_status(if_t ifp,struct ifmediareq * ifmr)3116 al_media_status(if_t ifp, struct ifmediareq *ifmr)
3117 {
3118 struct al_eth_adapter *sc = if_getsoftc(ifp);
3119 struct mii_data *mii;
3120
3121 if (sc->mii == NULL) {
3122 ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3123 ifmr->ifm_status = 0;
3124
3125 return;
3126 }
3127
3128 mii = sc->mii;
3129 mii_pollstat(mii);
3130
3131 ifmr->ifm_active = mii->mii_media_active;
3132 ifmr->ifm_status = mii->mii_media_status;
3133 }
3134
3135 static void
al_tick(void * arg)3136 al_tick(void *arg)
3137 {
3138 struct al_eth_adapter *adapter = arg;
3139
3140 mii_tick(adapter->mii);
3141
3142 /* Schedule another timeout one second from now */
3143 callout_schedule(&adapter->wd_callout, hz);
3144 }
3145
3146 static void
al_tick_stats(void * arg)3147 al_tick_stats(void *arg)
3148 {
3149 struct al_eth_adapter *adapter = arg;
3150
3151 al_eth_update_stats(adapter);
3152
3153 callout_schedule(&adapter->stats_callout, hz);
3154 }
3155
3156 static int
al_eth_up(struct al_eth_adapter * adapter)3157 al_eth_up(struct al_eth_adapter *adapter)
3158 {
3159 if_t ifp = adapter->netdev;
3160 int rc;
3161
3162 if (adapter->up)
3163 return (0);
3164
3165 if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3166 al_eth_function_reset(adapter);
3167 adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3168 }
3169
3170 if_sethwassist(ifp, 0);
3171 if ((if_getcapenable(ifp) & IFCAP_TSO) != 0)
3172 if_sethwassistbits(ifp, CSUM_TSO, 0);
3173 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
3174 if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0);
3175 if ((if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) != 0)
3176 if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
3177
3178 al_eth_serdes_init(adapter);
3179
3180 rc = al_eth_hw_init(adapter);
3181 if (rc != 0)
3182 goto err_hw_init_open;
3183
3184 rc = al_eth_setup_int_mode(adapter);
3185 if (rc != 0) {
3186 device_printf(adapter->dev,
3187 "%s failed at setup interrupt mode!\n", __func__);
3188 goto err_setup_int;
3189 }
3190
3191 /* allocate transmit descriptors */
3192 rc = al_eth_setup_all_tx_resources(adapter);
3193 if (rc != 0)
3194 goto err_setup_tx;
3195
3196 /* allocate receive descriptors */
3197 rc = al_eth_setup_all_rx_resources(adapter);
3198 if (rc != 0)
3199 goto err_setup_rx;
3200
3201 rc = al_eth_request_irq(adapter);
3202 if (rc != 0)
3203 goto err_req_irq;
3204
3205 al_eth_up_complete(adapter);
3206
3207 adapter->up = true;
3208
3209 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3210 if_link_state_change(adapter->netdev, LINK_STATE_UP);
3211
3212 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3213 mii_mediachg(adapter->mii);
3214
3215 /* Schedule watchdog timeout */
3216 mtx_lock(&adapter->wd_mtx);
3217 callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3218 mtx_unlock(&adapter->wd_mtx);
3219
3220 mii_pollstat(adapter->mii);
3221 }
3222
3223 return (rc);
3224
3225 err_req_irq:
3226 al_eth_free_all_rx_resources(adapter);
3227 err_setup_rx:
3228 al_eth_free_all_tx_resources(adapter);
3229 err_setup_tx:
3230 al_eth_free_irq(adapter);
3231 err_setup_int:
3232 al_eth_hw_stop(adapter);
3233 err_hw_init_open:
3234 al_eth_function_reset(adapter);
3235
3236 return (rc);
3237 }
3238
3239 static int
al_shutdown(device_t dev)3240 al_shutdown(device_t dev)
3241 {
3242 struct al_eth_adapter *adapter = device_get_softc(dev);
3243
3244 al_eth_down(adapter);
3245
3246 return (0);
3247 }
3248
3249 static void
al_eth_down(struct al_eth_adapter * adapter)3250 al_eth_down(struct al_eth_adapter *adapter)
3251 {
3252
3253 device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3254
3255 adapter->up = false;
3256
3257 mtx_lock(&adapter->wd_mtx);
3258 callout_stop(&adapter->wd_callout);
3259 mtx_unlock(&adapter->wd_mtx);
3260
3261 al_eth_disable_int_sync(adapter);
3262
3263 mtx_lock(&adapter->stats_mtx);
3264 callout_stop(&adapter->stats_callout);
3265 mtx_unlock(&adapter->stats_mtx);
3266
3267 al_eth_free_irq(adapter);
3268 al_eth_hw_stop(adapter);
3269
3270 al_eth_free_all_tx_resources(adapter);
3271 al_eth_free_all_rx_resources(adapter);
3272 }
3273
3274 static int
al_ioctl(if_t ifp,u_long command,caddr_t data)3275 al_ioctl(if_t ifp, u_long command, caddr_t data)
3276 {
3277 struct al_eth_adapter *adapter = if_getsoftc(ifp);
3278 struct ifreq *ifr = (struct ifreq *)data;
3279 int error = 0;
3280
3281 switch (command) {
3282 case SIOCSIFMTU:
3283 {
3284 error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3285 if (error != 0) {
3286 device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3287 if_getmtu(adapter->netdev));
3288 break;
3289 }
3290
3291 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3292 if_setmtu(adapter->netdev, ifr->ifr_mtu);
3293 al_init(adapter);
3294 break;
3295 }
3296 case SIOCSIFFLAGS:
3297 if ((if_getflags(ifp) & IFF_UP) != 0) {
3298 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3299 if (((if_getflags(ifp) ^ adapter->if_flags) &
3300 (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3301 device_printf_dbg(adapter->dev,
3302 "ioctl promisc/allmulti\n");
3303 al_eth_set_rx_mode(adapter);
3304 }
3305 } else {
3306 error = al_eth_up(adapter);
3307 if (error == 0)
3308 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3309 }
3310 } else {
3311 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3312 al_eth_down(adapter);
3313 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3314 }
3315 }
3316
3317 adapter->if_flags = if_getflags(ifp);
3318 break;
3319
3320 case SIOCADDMULTI:
3321 case SIOCDELMULTI:
3322 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3323 device_printf_dbg(adapter->dev,
3324 "ioctl add/del multi before\n");
3325 al_eth_set_rx_mode(adapter);
3326 #ifdef DEVICE_POLLING
3327 if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
3328 #endif
3329 }
3330 break;
3331 case SIOCSIFMEDIA:
3332 case SIOCGIFMEDIA:
3333 if (adapter->mii != NULL)
3334 error = ifmedia_ioctl(ifp, ifr,
3335 &adapter->mii->mii_media, command);
3336 else
3337 error = ifmedia_ioctl(ifp, ifr,
3338 &adapter->media, command);
3339 break;
3340 case SIOCSIFCAP:
3341 {
3342 int mask, reinit;
3343
3344 reinit = 0;
3345 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3346 #ifdef DEVICE_POLLING
3347 if ((mask & IFCAP_POLLING) != 0) {
3348 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3349 if (error != 0)
3350 return (error);
3351 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
3352 } else {
3353 error = ether_poll_deregister(ifp);
3354 /* Enable interrupt even in error case */
3355 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
3356 }
3357 }
3358 #endif
3359 if ((mask & IFCAP_HWCSUM) != 0) {
3360 /* apply to both rx and tx */
3361 if_togglecapenable(ifp, IFCAP_HWCSUM);
3362 reinit = 1;
3363 }
3364 if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3365 if_togglecapenable(ifp, IFCAP_HWCSUM_IPV6);
3366 reinit = 1;
3367 }
3368 if ((mask & IFCAP_TSO) != 0) {
3369 if_togglecapenable(ifp, IFCAP_TSO);
3370 reinit = 1;
3371 }
3372 if ((mask & IFCAP_LRO) != 0) {
3373 if_togglecapenable(ifp, IFCAP_LRO);
3374 }
3375 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3376 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
3377 reinit = 1;
3378 }
3379 if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3380 if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
3381 reinit = 1;
3382 }
3383 if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3384 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
3385 reinit = 1;
3386 }
3387 if ((reinit != 0) &&
3388 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) != 0)
3389 {
3390 al_init(adapter);
3391 }
3392 break;
3393 }
3394
3395 default:
3396 error = ether_ioctl(ifp, command, data);
3397 break;
3398 }
3399
3400 return (error);
3401 }
3402
3403 static int
al_is_device_supported(device_t dev)3404 al_is_device_supported(device_t dev)
3405 {
3406 uint16_t pci_vendor_id = pci_get_vendor(dev);
3407 uint16_t pci_device_id = pci_get_device(dev);
3408
3409 return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3410 (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3411 pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3412 pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3413 pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3414 }
3415
3416 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3417 #define MDIO_TIMEOUT_MSEC 100
3418 #define MDIO_PAUSE_MSEC 10
3419
3420 static int
al_miibus_readreg(device_t dev,int phy,int reg)3421 al_miibus_readreg(device_t dev, int phy, int reg)
3422 {
3423 struct al_eth_adapter *adapter = device_get_softc(dev);
3424 uint16_t value = 0;
3425 int rc;
3426 int timeout = MDIO_TIMEOUT_MSEC;
3427
3428 while (timeout > 0) {
3429 rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3430 -1, reg, &value);
3431
3432 if (rc == 0)
3433 return (value);
3434
3435 device_printf_dbg(adapter->dev,
3436 "mdio read failed. try again in 10 msec\n");
3437
3438 timeout -= MDIO_PAUSE_MSEC;
3439 pause("readred pause", MDIO_PAUSE_MSEC);
3440 }
3441
3442 if (rc != 0)
3443 device_printf(adapter->dev, "MDIO read failed on timeout\n");
3444
3445 return (value);
3446 }
3447
3448 static int
al_miibus_writereg(device_t dev,int phy,int reg,int value)3449 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3450 {
3451 struct al_eth_adapter *adapter = device_get_softc(dev);
3452 int rc;
3453 int timeout = MDIO_TIMEOUT_MSEC;
3454
3455 while (timeout > 0) {
3456 rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3457 -1, reg, value);
3458
3459 if (rc == 0)
3460 return (0);
3461
3462 device_printf(adapter->dev,
3463 "mdio write failed. try again in 10 msec\n");
3464
3465 timeout -= MDIO_PAUSE_MSEC;
3466 pause("miibus writereg", MDIO_PAUSE_MSEC);
3467 }
3468
3469 if (rc != 0)
3470 device_printf(adapter->dev, "MDIO write failed on timeout\n");
3471
3472 return (rc);
3473 }
3474
3475 static void
al_miibus_statchg(device_t dev)3476 al_miibus_statchg(device_t dev)
3477 {
3478 struct al_eth_adapter *adapter = device_get_softc(dev);
3479
3480 device_printf_dbg(adapter->dev,
3481 "al_miibus_statchg: state has changed!\n");
3482 device_printf_dbg(adapter->dev,
3483 "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3484 adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3485
3486 if (adapter->up == 0)
3487 return;
3488
3489 if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3490 if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3491 device_printf(adapter->dev, "link is UP\n");
3492 if_link_state_change(adapter->netdev, LINK_STATE_UP);
3493 } else {
3494 device_printf(adapter->dev, "link is DOWN\n");
3495 if_link_state_change(adapter->netdev, LINK_STATE_DOWN);
3496 }
3497 }
3498 }
3499
3500 static void
al_miibus_linkchg(device_t dev)3501 al_miibus_linkchg(device_t dev)
3502 {
3503 struct al_eth_adapter *adapter = device_get_softc(dev);
3504 uint8_t duplex = 0;
3505 uint8_t speed = 0;
3506
3507 if (adapter->mii == NULL)
3508 return;
3509
3510 if ((if_getflags(adapter->netdev) & IFF_UP) == 0)
3511 return;
3512
3513 /* Ignore link changes when link is not ready */
3514 if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3515 (IFM_AVALID | IFM_ACTIVE)) {
3516 return;
3517 }
3518
3519 if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3520 duplex = 1;
3521
3522 speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3523
3524 if (speed == IFM_10_T) {
3525 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3526 AL_10BASE_T_SPEED, duplex);
3527 return;
3528 }
3529
3530 if (speed == IFM_100_TX) {
3531 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3532 AL_100BASE_TX_SPEED, duplex);
3533 return;
3534 }
3535
3536 if (speed == IFM_1000_T) {
3537 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3538 AL_1000BASE_T_SPEED, duplex);
3539 return;
3540 }
3541
3542 device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3543 adapter->mii->mii_media_active);
3544 }
3545