1 /*-
2 * Copyright (c) 2016-2017 Alexander Motin <[email protected]>
3 * Copyright (C) 2013 Intel Corporation
4 * Copyright (C) 2015 EMC Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31 * two or more systems using a PCI-e links, providing remote memory access.
32 *
33 * This module contains a driver for NTB hardware in Intel Xeon/Atom CPUs.
34 *
35 * NOTE: Much of the code in this module is shared with Linux. Any patches may
36 * be picked up and redistributed in Linux with a dual GPL/BSD license.
37 */
38
39 #include <sys/cdefs.h>
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/interrupt.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/module.h>
49 #include <sys/mutex.h>
50 #include <sys/pciio.h>
51 #include <sys/taskqueue.h>
52 #include <sys/tree.h>
53 #include <sys/queue.h>
54 #include <sys/rman.h>
55 #include <sys/sbuf.h>
56 #include <sys/sysctl.h>
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <machine/bus.h>
60 #include <machine/intr_machdep.h>
61 #include <machine/resource.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/iommu/iommu.h>
65
66 #include "ntb_hw_intel.h"
67 #include "../ntb.h"
68
69 #define MAX_MSIX_INTERRUPTS \
70 MAX(MAX(XEON_DB_COUNT, ATOM_DB_COUNT), XEON_GEN3_DB_COUNT)
71
72 #define NTB_HB_TIMEOUT 1 /* second */
73 #define ATOM_LINK_RECOVERY_TIME 500 /* ms */
74 #define BAR_HIGH_MASK (~((1ull << 12) - 1))
75
76 #define NTB_MSIX_VER_GUARD 0xaabbccdd
77 #define NTB_MSIX_RECEIVED 0xe0f0e0f0
78
79 /*
80 * PCI constants could be somewhere more generic, but aren't defined/used in
81 * pci.c.
82 */
83 #define PCI_MSIX_ENTRY_SIZE 16
84 #define PCI_MSIX_ENTRY_LOWER_ADDR 0
85 #define PCI_MSIX_ENTRY_UPPER_ADDR 4
86 #define PCI_MSIX_ENTRY_DATA 8
87
88 enum ntb_device_type {
89 NTB_XEON_GEN1,
90 NTB_XEON_GEN3,
91 NTB_ATOM
92 };
93
94 /* ntb_conn_type are hardware numbers, cannot change. */
95 enum ntb_conn_type {
96 NTB_CONN_TRANSPARENT = 0,
97 NTB_CONN_B2B = 1,
98 NTB_CONN_RP = 2,
99 };
100
101 enum ntb_b2b_direction {
102 NTB_DEV_USD = 0,
103 NTB_DEV_DSD = 1,
104 };
105
106 enum ntb_bar {
107 NTB_CONFIG_BAR = 0,
108 NTB_B2B_BAR_1,
109 NTB_B2B_BAR_2,
110 NTB_B2B_BAR_3,
111 NTB_MAX_BARS
112 };
113
114 enum {
115 NTB_MSIX_GUARD = 0,
116 NTB_MSIX_DATA0,
117 NTB_MSIX_DATA1,
118 NTB_MSIX_DATA2,
119 NTB_MSIX_OFS0,
120 NTB_MSIX_OFS1,
121 NTB_MSIX_OFS2,
122 NTB_MSIX_DONE,
123 NTB_MAX_MSIX_SPAD
124 };
125
126 /* Device features and workarounds */
127 #define HAS_FEATURE(ntb, feature) \
128 (((ntb)->features & (feature)) != 0)
129
130 struct ntb_hw_info {
131 uint32_t device_id;
132 const char *desc;
133 enum ntb_device_type type;
134 uint32_t features;
135 };
136
137 struct ntb_pci_bar_info {
138 bus_space_tag_t pci_bus_tag;
139 bus_space_handle_t pci_bus_handle;
140 int pci_resource_id;
141 struct resource *pci_resource;
142 vm_paddr_t pbase;
143 caddr_t vbase;
144 vm_size_t size;
145 vm_memattr_t map_mode;
146
147 /* Configuration register offsets */
148 uint32_t psz_off;
149 uint32_t ssz_off;
150 uint32_t pbarxlat_off;
151 };
152
153 struct ntb_int_info {
154 struct resource *res;
155 int rid;
156 void *tag;
157 };
158
159 struct ntb_vec {
160 struct ntb_softc *ntb;
161 uint32_t num;
162 unsigned masked;
163 };
164
165 struct ntb_reg {
166 uint32_t ntb_ctl;
167 uint32_t lnk_sta;
168 uint8_t db_size;
169 unsigned mw_bar[NTB_MAX_BARS];
170 };
171
172 struct ntb_alt_reg {
173 uint32_t db_bell;
174 uint32_t db_mask;
175 uint32_t spad;
176 };
177
178 struct ntb_xlat_reg {
179 uint32_t bar0_base;
180 uint32_t bar2_base;
181 uint32_t bar4_base;
182 uint32_t bar5_base;
183
184 uint32_t bar2_xlat;
185 uint32_t bar4_xlat;
186 uint32_t bar5_xlat;
187
188 uint32_t bar2_limit;
189 uint32_t bar4_limit;
190 uint32_t bar5_limit;
191 };
192
193 struct ntb_b2b_addr {
194 uint64_t bar0_addr;
195 uint64_t bar2_addr64;
196 uint64_t bar4_addr64;
197 uint64_t bar4_addr32;
198 uint64_t bar5_addr32;
199 };
200
201 struct ntb_msix_data {
202 uint32_t nmd_ofs;
203 uint32_t nmd_data;
204 };
205
206 struct ntb_softc {
207 /* ntb.c context. Do not move! Must go first! */
208 void *ntb_store;
209
210 device_t device;
211 enum ntb_device_type type;
212 uint32_t features;
213
214 struct ntb_pci_bar_info bar_info[NTB_MAX_BARS];
215 struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS];
216 uint32_t allocated_interrupts;
217
218 struct ntb_msix_data peer_msix_data[XEON_NONLINK_DB_MSIX_BITS];
219 struct ntb_msix_data msix_data[XEON_NONLINK_DB_MSIX_BITS];
220 bool peer_msix_good;
221 bool peer_msix_done;
222 struct ntb_pci_bar_info *peer_lapic_bar;
223 struct callout peer_msix_work;
224
225 bus_dma_tag_t bar0_dma_tag;
226 bus_dmamap_t bar0_dma_map;
227
228 struct callout heartbeat_timer;
229 struct callout lr_timer;
230
231 struct ntb_vec *msix_vec;
232
233 uint32_t ppd;
234 enum ntb_conn_type conn_type;
235 enum ntb_b2b_direction dev_type;
236
237 /* Offset of peer bar0 in B2B BAR */
238 uint64_t b2b_off;
239 /* Memory window used to access peer bar0 */
240 #define B2B_MW_DISABLED UINT8_MAX
241 uint8_t b2b_mw_idx;
242 uint32_t msix_xlat;
243 uint8_t msix_mw_idx;
244
245 uint8_t mw_count;
246 uint8_t spad_count;
247 uint8_t db_count;
248 uint8_t db_vec_count;
249 uint8_t db_vec_shift;
250
251 /* Protects local db_mask. */
252 #define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock)
253 #define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock)
254 #define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f))
255 struct mtx db_mask_lock;
256
257 volatile uint32_t ntb_ctl;
258 volatile uint32_t lnk_sta;
259
260 uint64_t db_valid_mask;
261 uint64_t db_link_mask;
262 uint64_t db_mask;
263 uint64_t fake_db; /* NTB_SB01BASE_LOCKUP*/
264 uint64_t force_db; /* NTB_SB01BASE_LOCKUP*/
265
266 int last_ts; /* ticks @ last irq */
267
268 const struct ntb_reg *reg;
269 const struct ntb_alt_reg *self_reg;
270 const struct ntb_alt_reg *peer_reg;
271 const struct ntb_xlat_reg *xlat_reg;
272 };
273
274 #ifdef __i386__
275 static __inline uint64_t
bus_space_read_8(bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t offset)276 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
277 bus_size_t offset)
278 {
279
280 return (bus_space_read_4(tag, handle, offset) |
281 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
282 }
283
284 static __inline void
bus_space_write_8(bus_space_tag_t tag,bus_space_handle_t handle,bus_size_t offset,uint64_t val)285 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
286 bus_size_t offset, uint64_t val)
287 {
288
289 bus_space_write_4(tag, handle, offset, val);
290 bus_space_write_4(tag, handle, offset + 4, val >> 32);
291 }
292 #endif
293
294 #define intel_ntb_bar_read(SIZE, bar, offset) \
295 bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
296 ntb->bar_info[(bar)].pci_bus_handle, (offset))
297 #define intel_ntb_bar_write(SIZE, bar, offset, val) \
298 bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
299 ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
300 #define intel_ntb_reg_read(SIZE, offset) \
301 intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
302 #define intel_ntb_reg_write(SIZE, offset, val) \
303 intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
304 #define intel_ntb_mw_read(SIZE, offset) \
305 intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
306 offset)
307 #define intel_ntb_mw_write(SIZE, offset, val) \
308 intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
309 offset, val)
310
311 static int intel_ntb_probe(device_t device);
312 static int intel_ntb_attach(device_t device);
313 static int intel_ntb_detach(device_t device);
314 static uint64_t intel_ntb_db_valid_mask(device_t dev);
315 static void intel_ntb_spad_clear(device_t dev);
316 static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector);
317 static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed,
318 enum ntb_width *width);
319 static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed,
320 enum ntb_width width);
321 static int intel_ntb_link_disable(device_t dev);
322 static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val);
323 static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val);
324
325 static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
326 static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
327 static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar);
328 static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar,
329 uint32_t *base, uint32_t *xlat, uint32_t *lmt);
330 static int intel_ntb_map_pci_bars(struct ntb_softc *ntb);
331 static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
332 vm_memattr_t);
333 static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *,
334 const char *);
335 static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
336 static int map_memory_window_bar(struct ntb_softc *ntb,
337 struct ntb_pci_bar_info *bar);
338 static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb);
339 static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
340 static int intel_ntb_init_isr(struct ntb_softc *ntb);
341 static int intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb);
342 static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
343 static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
344 static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb);
345 static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
346 static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec);
347 static void ndev_vec_isr(void *arg);
348 static void ndev_irq_isr(void *arg);
349 static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff);
350 static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t);
351 static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t);
352 static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
353 static void intel_ntb_free_msix_vec(struct ntb_softc *ntb);
354 static void intel_ntb_get_msix_info(struct ntb_softc *ntb);
355 static void intel_ntb_exchange_msix(void *);
356 static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id);
357 static void intel_ntb_detect_max_mw(struct ntb_softc *ntb);
358 static int intel_ntb_detect_xeon(struct ntb_softc *ntb);
359 static int intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb);
360 static int intel_ntb_detect_atom(struct ntb_softc *ntb);
361 static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb);
362 static int intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb);
363 static int intel_ntb_atom_init_dev(struct ntb_softc *ntb);
364 static void intel_ntb_teardown_xeon(struct ntb_softc *ntb);
365 static void configure_atom_secondary_side_bars(struct ntb_softc *ntb);
366 static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx,
367 enum ntb_bar regbar);
368 static void xeon_set_sbar_base_and_limit(struct ntb_softc *,
369 uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar);
370 static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr,
371 enum ntb_bar idx);
372 static int xeon_setup_b2b_mw(struct ntb_softc *,
373 const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr);
374 static int xeon_gen3_setup_b2b_mw(struct ntb_softc *);
375 static int intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr,
376 size_t size);
377 static inline bool link_is_up(struct ntb_softc *ntb);
378 static inline bool _xeon_link_is_up(struct ntb_softc *ntb);
379 static inline bool atom_link_is_err(struct ntb_softc *ntb);
380 static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *);
381 static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *);
382 static void atom_link_hb(void *arg);
383 static void recover_atom_link(void *arg);
384 static bool intel_ntb_poll_link(struct ntb_softc *ntb);
385 static void save_bar_parameters(struct ntb_pci_bar_info *bar);
386 static void intel_ntb_sysctl_init(struct ntb_softc *);
387 static int sysctl_handle_features(SYSCTL_HANDLER_ARGS);
388 static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS);
389 static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS);
390 static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS);
391 static int sysctl_handle_register(SYSCTL_HANDLER_ARGS);
392
393 static unsigned g_ntb_hw_debug_level;
394 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
395 &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose");
396 #define intel_ntb_printf(lvl, ...) do { \
397 if ((lvl) <= g_ntb_hw_debug_level) { \
398 device_printf(ntb->device, __VA_ARGS__); \
399 } \
400 } while (0)
401
402 #define _NTB_PAT_UC 0
403 #define _NTB_PAT_WC 1
404 #define _NTB_PAT_WT 4
405 #define _NTB_PAT_WP 5
406 #define _NTB_PAT_WB 6
407 #define _NTB_PAT_UCM 7
408 static unsigned g_ntb_mw_pat = _NTB_PAT_UC;
409 SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN,
410 &g_ntb_mw_pat, 0, "Configure the default memory window cache flags (PAT): "
411 "UC: " __XSTRING(_NTB_PAT_UC) ", "
412 "WC: " __XSTRING(_NTB_PAT_WC) ", "
413 "WT: " __XSTRING(_NTB_PAT_WT) ", "
414 "WP: " __XSTRING(_NTB_PAT_WP) ", "
415 "WB: " __XSTRING(_NTB_PAT_WB) ", "
416 "UC-: " __XSTRING(_NTB_PAT_UCM));
417
418 static inline vm_memattr_t
intel_ntb_pat_flags(void)419 intel_ntb_pat_flags(void)
420 {
421
422 switch (g_ntb_mw_pat) {
423 case _NTB_PAT_WC:
424 return (VM_MEMATTR_WRITE_COMBINING);
425 case _NTB_PAT_WT:
426 return (VM_MEMATTR_WRITE_THROUGH);
427 case _NTB_PAT_WP:
428 return (VM_MEMATTR_WRITE_PROTECTED);
429 case _NTB_PAT_WB:
430 return (VM_MEMATTR_WRITE_BACK);
431 case _NTB_PAT_UCM:
432 return (VM_MEMATTR_WEAK_UNCACHEABLE);
433 case _NTB_PAT_UC:
434 /* FALLTHROUGH */
435 default:
436 return (VM_MEMATTR_UNCACHEABLE);
437 }
438 }
439
440 /*
441 * Well, this obviously doesn't belong here, but it doesn't seem to exist
442 * anywhere better yet.
443 */
444 static inline const char *
intel_ntb_vm_memattr_to_str(vm_memattr_t pat)445 intel_ntb_vm_memattr_to_str(vm_memattr_t pat)
446 {
447
448 switch (pat) {
449 case VM_MEMATTR_WRITE_COMBINING:
450 return ("WRITE_COMBINING");
451 case VM_MEMATTR_WRITE_THROUGH:
452 return ("WRITE_THROUGH");
453 case VM_MEMATTR_WRITE_PROTECTED:
454 return ("WRITE_PROTECTED");
455 case VM_MEMATTR_WRITE_BACK:
456 return ("WRITE_BACK");
457 case VM_MEMATTR_WEAK_UNCACHEABLE:
458 return ("UNCACHED");
459 case VM_MEMATTR_UNCACHEABLE:
460 return ("UNCACHEABLE");
461 default:
462 return ("UNKNOWN");
463 }
464 }
465
466 static int g_ntb_msix_idx = 1;
467 SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx,
468 0, "Use this memory window to access the peer MSIX message complex on "
469 "certain Xeon-based NTB systems, as a workaround for a hardware errata. "
470 "Like b2b_mw_idx, negative values index from the last available memory "
471 "window. (Applies on Xeon platforms with SB01BASE_LOCKUP errata.)");
472
473 static int g_ntb_mw_idx = -1;
474 SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx,
475 0, "Use this memory window to access the peer NTB registers. A "
476 "non-negative value starts from the first MW index; a negative value "
477 "starts from the last MW index. The default is -1, i.e., the last "
478 "available memory window. Both sides of the NTB MUST set the same "
479 "value here! (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)");
480
481 /* Hardware owns the low 16 bits of features. */
482 #define NTB_BAR_SIZE_4K (1 << 0)
483 #define NTB_SDOORBELL_LOCKUP (1 << 1)
484 #define NTB_SB01BASE_LOCKUP (1 << 2)
485 #define NTB_B2BDOORBELL_BIT14 (1 << 3)
486 /* Software/configuration owns the top 16 bits. */
487 #define NTB_SPLIT_BAR (1ull << 16)
488 #define NTB_ONE_MSIX (1ull << 17)
489
490 #define NTB_FEATURES_STR \
491 "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \
492 "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K"
493
494 static struct ntb_hw_info pci_ids[] = {
495 /* XXX: PS/SS IDs left out until they are supported. */
496 { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B",
497 NTB_ATOM, 0 },
498
499 { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B",
500 NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
501 { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B",
502 NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
503 { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B",
504 NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
505 NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K },
506 { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B",
507 NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
508 NTB_SB01BASE_LOCKUP },
509 { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B",
510 NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
511 NTB_SB01BASE_LOCKUP },
512
513 { 0x201C8086, "SKL Xeon E5 V5 Non-Transparent Bridge B2B",
514 NTB_XEON_GEN3, 0 },
515 };
516
517 static const struct ntb_reg atom_reg = {
518 .ntb_ctl = ATOM_NTBCNTL_OFFSET,
519 .lnk_sta = ATOM_LINK_STATUS_OFFSET,
520 .db_size = sizeof(uint64_t),
521 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
522 };
523
524 static const struct ntb_alt_reg atom_pri_reg = {
525 .db_bell = ATOM_PDOORBELL_OFFSET,
526 .db_mask = ATOM_PDBMSK_OFFSET,
527 .spad = ATOM_SPAD_OFFSET,
528 };
529
530 static const struct ntb_alt_reg atom_b2b_reg = {
531 .db_bell = ATOM_B2B_DOORBELL_OFFSET,
532 .spad = ATOM_B2B_SPAD_OFFSET,
533 };
534
535 static const struct ntb_xlat_reg atom_sec_xlat = {
536 #if 0
537 /* "FIXME" says the Linux driver. */
538 .bar0_base = ATOM_SBAR0BASE_OFFSET,
539 .bar2_base = ATOM_SBAR2BASE_OFFSET,
540 .bar4_base = ATOM_SBAR4BASE_OFFSET,
541
542 .bar2_limit = ATOM_SBAR2LMT_OFFSET,
543 .bar4_limit = ATOM_SBAR4LMT_OFFSET,
544 #endif
545
546 .bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
547 .bar4_xlat = ATOM_SBAR4XLAT_OFFSET,
548 };
549
550 static const struct ntb_reg xeon_reg = {
551 .ntb_ctl = XEON_NTBCNTL_OFFSET,
552 .lnk_sta = XEON_LINK_STATUS_OFFSET,
553 .db_size = sizeof(uint16_t),
554 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 },
555 };
556
557 static const struct ntb_alt_reg xeon_pri_reg = {
558 .db_bell = XEON_PDOORBELL_OFFSET,
559 .db_mask = XEON_PDBMSK_OFFSET,
560 .spad = XEON_SPAD_OFFSET,
561 };
562
563 static const struct ntb_alt_reg xeon_b2b_reg = {
564 .db_bell = XEON_B2B_DOORBELL_OFFSET,
565 .spad = XEON_B2B_SPAD_OFFSET,
566 };
567
568 static const struct ntb_xlat_reg xeon_sec_xlat = {
569 .bar0_base = XEON_SBAR0BASE_OFFSET,
570 .bar2_base = XEON_SBAR2BASE_OFFSET,
571 .bar4_base = XEON_SBAR4BASE_OFFSET,
572 .bar5_base = XEON_SBAR5BASE_OFFSET,
573
574 .bar2_limit = XEON_SBAR2LMT_OFFSET,
575 .bar4_limit = XEON_SBAR4LMT_OFFSET,
576 .bar5_limit = XEON_SBAR5LMT_OFFSET,
577
578 .bar2_xlat = XEON_SBAR2XLAT_OFFSET,
579 .bar4_xlat = XEON_SBAR4XLAT_OFFSET,
580 .bar5_xlat = XEON_SBAR5XLAT_OFFSET,
581 };
582
583 static struct ntb_b2b_addr xeon_b2b_usd_addr = {
584 .bar0_addr = XEON_B2B_BAR0_ADDR,
585 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
586 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
587 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
588 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
589 };
590
591 static struct ntb_b2b_addr xeon_b2b_dsd_addr = {
592 .bar0_addr = XEON_B2B_BAR0_ADDR,
593 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
594 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
595 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
596 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
597 };
598
599 static const struct ntb_reg xeon_gen3_reg = {
600 .ntb_ctl = XEON_GEN3_REG_IMNTB_CTRL,
601 .lnk_sta = XEON_GEN3_INT_LNK_STS_OFFSET,
602 .db_size = sizeof(uint32_t),
603 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
604 };
605
606 static const struct ntb_alt_reg xeon_gen3_pri_reg = {
607 .db_bell = XEON_GEN3_REG_EMDOORBELL,
608 .db_mask = XEON_GEN3_REG_IMINT_DISABLE,
609 .spad = XEON_GEN3_REG_IMSPAD,
610 };
611
612 static const struct ntb_alt_reg xeon_gen3_b2b_reg = {
613 .db_bell = XEON_GEN3_REG_IMDOORBELL,
614 .db_mask = XEON_GEN3_REG_EMINT_DISABLE,
615 .spad = XEON_GEN3_REG_IMB2B_SSPAD,
616 };
617
618 static const struct ntb_xlat_reg xeon_gen3_sec_xlat = {
619 .bar0_base = XEON_GEN3_EXT_REG_BAR0BASE,
620 .bar2_base = XEON_GEN3_EXT_REG_BAR1BASE,
621 .bar4_base = XEON_GEN3_EXT_REG_BAR2BASE,
622
623 .bar2_limit = XEON_GEN3_REG_IMBAR1XLIMIT,
624 .bar4_limit = XEON_GEN3_REG_IMBAR2XLIMIT,
625
626 .bar2_xlat = XEON_GEN3_REG_IMBAR1XBASE,
627 .bar4_xlat = XEON_GEN3_REG_IMBAR2XBASE,
628 };
629
630 SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
631 "B2B MW segment overrides -- MUST be the same on both sides");
632
633 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN,
634 &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
635 "hardware, use this 64-bit address on the bus between the NTB devices for "
636 "the window at BAR2, on the upstream side of the link. MUST be the same "
637 "address on both sides.");
638 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN,
639 &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4.");
640 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN,
641 &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 "
642 "(split-BAR mode).");
643 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN,
644 &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 "
645 "(split-BAR mode).");
646
647 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN,
648 &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
649 "hardware, use this 64-bit address on the bus between the NTB devices for "
650 "the window at BAR2, on the downstream side of the link. MUST be the same"
651 " address on both sides.");
652 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN,
653 &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4.");
654 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN,
655 &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 "
656 "(split-BAR mode).");
657 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN,
658 &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 "
659 "(split-BAR mode).");
660
661 /*
662 * OS <-> Driver interface structures
663 */
664 MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations");
665
666 /*
667 * OS <-> Driver linkage functions
668 */
669 static int
intel_ntb_probe(device_t device)670 intel_ntb_probe(device_t device)
671 {
672 struct ntb_hw_info *p;
673
674 p = intel_ntb_get_device_info(pci_get_devid(device));
675 if (p == NULL)
676 return (ENXIO);
677
678 device_set_desc(device, p->desc);
679 return (0);
680 }
681
682 static int
intel_ntb_attach(device_t device)683 intel_ntb_attach(device_t device)
684 {
685 struct ntb_softc *ntb;
686 struct ntb_hw_info *p;
687 int error;
688
689 ntb = device_get_softc(device);
690 p = intel_ntb_get_device_info(pci_get_devid(device));
691
692 ntb->device = device;
693 ntb->type = p->type;
694 ntb->features = p->features;
695 ntb->b2b_mw_idx = B2B_MW_DISABLED;
696 ntb->msix_mw_idx = B2B_MW_DISABLED;
697
698 /* Heartbeat timer for NTB_ATOM since there is no link interrupt */
699 callout_init(&ntb->heartbeat_timer, 1);
700 callout_init(&ntb->lr_timer, 1);
701 callout_init(&ntb->peer_msix_work, 1);
702 mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN);
703
704 if (ntb->type == NTB_ATOM)
705 error = intel_ntb_detect_atom(ntb);
706 else if (ntb->type == NTB_XEON_GEN3)
707 error = intel_ntb_detect_xeon_gen3(ntb);
708 else
709 error = intel_ntb_detect_xeon(ntb);
710 if (error != 0)
711 goto out;
712
713 intel_ntb_detect_max_mw(ntb);
714
715 pci_enable_busmaster(ntb->device);
716
717 error = intel_ntb_map_pci_bars(ntb);
718 if (error != 0)
719 goto out;
720 if (ntb->type == NTB_ATOM)
721 error = intel_ntb_atom_init_dev(ntb);
722 else if (ntb->type == NTB_XEON_GEN3)
723 error = intel_ntb_xeon_gen3_init_dev(ntb);
724 else
725 error = intel_ntb_xeon_init_dev(ntb);
726 if (error != 0)
727 goto out;
728
729 intel_ntb_spad_clear(device);
730
731 intel_ntb_poll_link(ntb);
732
733 intel_ntb_sysctl_init(ntb);
734
735 /* Attach children to this controller */
736 error = ntb_register_device(device);
737
738 out:
739 if (error != 0)
740 intel_ntb_detach(device);
741 return (error);
742 }
743
744 static int
intel_ntb_detach(device_t device)745 intel_ntb_detach(device_t device)
746 {
747 struct ntb_softc *ntb;
748
749 ntb = device_get_softc(device);
750
751 /* Detach & delete all children */
752 ntb_unregister_device(device);
753
754 if (ntb->self_reg != NULL) {
755 DB_MASK_LOCK(ntb);
756 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_valid_mask);
757 DB_MASK_UNLOCK(ntb);
758 }
759 callout_drain(&ntb->heartbeat_timer);
760 callout_drain(&ntb->lr_timer);
761 callout_drain(&ntb->peer_msix_work);
762 pci_disable_busmaster(ntb->device);
763 if (ntb->type == NTB_XEON_GEN1)
764 intel_ntb_teardown_xeon(ntb);
765 intel_ntb_teardown_interrupts(ntb);
766
767 mtx_destroy(&ntb->db_mask_lock);
768
769 intel_ntb_unmap_pci_bar(ntb);
770
771 return (0);
772 }
773
774 /*
775 * Driver internal routines
776 */
777 static inline enum ntb_bar
intel_ntb_mw_to_bar(struct ntb_softc * ntb,unsigned mw)778 intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
779 {
780
781 KASSERT(mw < ntb->mw_count,
782 ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count));
783 KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw"));
784
785 return (ntb->reg->mw_bar[mw]);
786 }
787
788 static inline bool
bar_is_64bit(struct ntb_softc * ntb,enum ntb_bar bar)789 bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar)
790 {
791 /* XXX This assertion could be stronger. */
792 KASSERT(bar < NTB_MAX_BARS, ("bogus bar"));
793 return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(ntb, NTB_SPLIT_BAR));
794 }
795
796 static inline void
bar_get_xlat_params(struct ntb_softc * ntb,enum ntb_bar bar,uint32_t * base,uint32_t * xlat,uint32_t * lmt)797 bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base,
798 uint32_t *xlat, uint32_t *lmt)
799 {
800 uint32_t basev, lmtv, xlatv;
801
802 switch (bar) {
803 case NTB_B2B_BAR_1:
804 basev = ntb->xlat_reg->bar2_base;
805 lmtv = ntb->xlat_reg->bar2_limit;
806 xlatv = ntb->xlat_reg->bar2_xlat;
807 break;
808 case NTB_B2B_BAR_2:
809 basev = ntb->xlat_reg->bar4_base;
810 lmtv = ntb->xlat_reg->bar4_limit;
811 xlatv = ntb->xlat_reg->bar4_xlat;
812 break;
813 case NTB_B2B_BAR_3:
814 basev = ntb->xlat_reg->bar5_base;
815 lmtv = ntb->xlat_reg->bar5_limit;
816 xlatv = ntb->xlat_reg->bar5_xlat;
817 break;
818 default:
819 KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS,
820 ("bad bar"));
821 basev = lmtv = xlatv = 0;
822 break;
823 }
824
825 if (base != NULL)
826 *base = basev;
827 if (xlat != NULL)
828 *xlat = xlatv;
829 if (lmt != NULL)
830 *lmt = lmtv;
831 }
832
833 static int
intel_ntb_map_pci_bars(struct ntb_softc * ntb)834 intel_ntb_map_pci_bars(struct ntb_softc *ntb)
835 {
836 struct ntb_pci_bar_info *bar;
837 int rc;
838
839 bar = &ntb->bar_info[NTB_CONFIG_BAR];
840 bar->pci_resource_id = PCIR_BAR(0);
841 rc = map_mmr_bar(ntb, bar);
842 if (rc != 0)
843 goto out;
844
845 /*
846 * At least on Xeon v4 NTB device leaks to host some remote side
847 * BAR0 writes supposed to update scratchpad registers. I am not
848 * sure why it happens, but it may be related to the fact that
849 * on a link side BAR0 is 32KB, while on a host side it is 64KB.
850 * Without this hack DMAR blocks those accesses as not allowed.
851 */
852 if (bus_dma_tag_create(bus_get_dma_tag(ntb->device), 1, 0,
853 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
854 bar->size, 1, bar->size, 0, NULL, NULL, &ntb->bar0_dma_tag)) {
855 device_printf(ntb->device, "Unable to create BAR0 tag\n");
856 return (ENOMEM);
857 }
858 if (bus_dmamap_create(ntb->bar0_dma_tag, 0, &ntb->bar0_dma_map)) {
859 device_printf(ntb->device, "Unable to create BAR0 map\n");
860 return (ENOMEM);
861 }
862 if (bus_dma_iommu_load_ident(ntb->bar0_dma_tag, ntb->bar0_dma_map,
863 bar->pbase, bar->size, 0)) {
864 device_printf(ntb->device, "Unable to load BAR0 map\n");
865 return (ENOMEM);
866 }
867
868 bar = &ntb->bar_info[NTB_B2B_BAR_1];
869 bar->pci_resource_id = PCIR_BAR(2);
870 rc = map_memory_window_bar(ntb, bar);
871 if (rc != 0)
872 goto out;
873 if (ntb->type == NTB_XEON_GEN3) {
874 bar->psz_off = XEON_GEN3_INT_REG_IMBAR1SZ;
875 bar->ssz_off = XEON_GEN3_INT_REG_EMBAR1SZ;
876 bar->pbarxlat_off = XEON_GEN3_REG_EMBAR1XBASE;
877 } else {
878 bar->psz_off = XEON_PBAR23SZ_OFFSET;
879 bar->ssz_off = XEON_SBAR23SZ_OFFSET;
880 bar->pbarxlat_off = XEON_PBAR2XLAT_OFFSET;
881 }
882
883 bar = &ntb->bar_info[NTB_B2B_BAR_2];
884 bar->pci_resource_id = PCIR_BAR(4);
885 rc = map_memory_window_bar(ntb, bar);
886 if (rc != 0)
887 goto out;
888 if (ntb->type == NTB_XEON_GEN3) {
889 bar->psz_off = XEON_GEN3_INT_REG_IMBAR2SZ;
890 bar->ssz_off = XEON_GEN3_INT_REG_EMBAR2SZ;
891 bar->pbarxlat_off = XEON_GEN3_REG_EMBAR2XBASE;
892 } else {
893 bar->psz_off = XEON_PBAR4SZ_OFFSET;
894 bar->ssz_off = XEON_SBAR4SZ_OFFSET;
895 bar->pbarxlat_off = XEON_PBAR4XLAT_OFFSET;
896 }
897
898 if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR))
899 goto out;
900
901 if (ntb->type == NTB_XEON_GEN3) {
902 device_printf(ntb->device, "no split bar support\n");
903 return (ENXIO);
904 }
905
906 bar = &ntb->bar_info[NTB_B2B_BAR_3];
907 bar->pci_resource_id = PCIR_BAR(5);
908 rc = map_memory_window_bar(ntb, bar);
909 bar->psz_off = XEON_PBAR5SZ_OFFSET;
910 bar->ssz_off = XEON_SBAR5SZ_OFFSET;
911 bar->pbarxlat_off = XEON_PBAR5XLAT_OFFSET;
912
913 out:
914 if (rc != 0)
915 device_printf(ntb->device,
916 "unable to allocate pci resource\n");
917 return (rc);
918 }
919
920 static void
print_map_success(struct ntb_softc * ntb,struct ntb_pci_bar_info * bar,const char * kind)921 print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar,
922 const char *kind)
923 {
924
925 device_printf(ntb->device,
926 "Mapped BAR%d v:[%p-%p] p:[0x%jx-0x%jx] (0x%jx bytes) (%s)\n",
927 PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
928 (char *)bar->vbase + bar->size - 1,
929 (uintmax_t)bar->pbase, (uintmax_t)(bar->pbase + bar->size - 1),
930 (uintmax_t)bar->size, kind);
931 }
932
933 static int
map_mmr_bar(struct ntb_softc * ntb,struct ntb_pci_bar_info * bar)934 map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
935 {
936
937 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
938 &bar->pci_resource_id, RF_ACTIVE);
939 if (bar->pci_resource == NULL)
940 return (ENXIO);
941
942 save_bar_parameters(bar);
943 bar->map_mode = VM_MEMATTR_UNCACHEABLE;
944 print_map_success(ntb, bar, "mmr");
945 return (0);
946 }
947
948 static int
map_memory_window_bar(struct ntb_softc * ntb,struct ntb_pci_bar_info * bar)949 map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
950 {
951 int rc;
952 vm_memattr_t mapmode;
953 uint8_t bar_size_bits = 0;
954
955 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
956 &bar->pci_resource_id, RF_ACTIVE);
957
958 if (bar->pci_resource == NULL)
959 return (ENXIO);
960
961 save_bar_parameters(bar);
962 /*
963 * Ivytown NTB BAR sizes are misreported by the hardware due to a
964 * hardware issue. To work around this, query the size it should be
965 * configured to by the device and modify the resource to correspond to
966 * this new size. The BIOS on systems with this problem is required to
967 * provide enough address space to allow the driver to make this change
968 * safely.
969 *
970 * Ideally I could have just specified the size when I allocated the
971 * resource like:
972 * bus_alloc_resource(ntb->device,
973 * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul,
974 * 1ul << bar_size_bits, RF_ACTIVE);
975 * but the PCI driver does not honor the size in this call, so we have
976 * to modify it after the fact.
977 */
978 if (HAS_FEATURE(ntb, NTB_BAR_SIZE_4K)) {
979 if (bar->pci_resource_id == PCIR_BAR(2))
980 bar_size_bits = pci_read_config(ntb->device,
981 XEON_PBAR23SZ_OFFSET, 1);
982 else
983 bar_size_bits = pci_read_config(ntb->device,
984 XEON_PBAR45SZ_OFFSET, 1);
985
986 rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY,
987 bar->pci_resource, bar->pbase,
988 bar->pbase + (1ul << bar_size_bits) - 1);
989 if (rc != 0) {
990 device_printf(ntb->device,
991 "unable to resize bar\n");
992 return (rc);
993 }
994
995 save_bar_parameters(bar);
996 }
997
998 bar->map_mode = VM_MEMATTR_UNCACHEABLE;
999 print_map_success(ntb, bar, "mw");
1000
1001 /*
1002 * Optionally, mark MW BARs as anything other than UC to improve
1003 * performance.
1004 */
1005 mapmode = intel_ntb_pat_flags();
1006 if (mapmode == bar->map_mode)
1007 return (0);
1008
1009 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mapmode);
1010 if (rc == 0) {
1011 bar->map_mode = mapmode;
1012 device_printf(ntb->device,
1013 "Marked BAR%d v:[%p-%p] p:[0x%jx-0x%jx] as "
1014 "%s.\n",
1015 PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1016 (char *)bar->vbase + bar->size - 1,
1017 (uintmax_t)bar->pbase,
1018 (uintmax_t)(bar->pbase + bar->size - 1),
1019 intel_ntb_vm_memattr_to_str(mapmode));
1020 } else
1021 device_printf(ntb->device,
1022 "Unable to mark BAR%d v:[%p-%p] p:[0x%jx-0x%jx] as "
1023 "%s: %d\n",
1024 PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1025 (char *)bar->vbase + bar->size - 1,
1026 (uintmax_t)bar->pbase,
1027 (uintmax_t)(bar->pbase + bar->size - 1),
1028 intel_ntb_vm_memattr_to_str(mapmode), rc);
1029 /* Proceed anyway */
1030 return (0);
1031 }
1032
1033 static void
intel_ntb_unmap_pci_bar(struct ntb_softc * ntb)1034 intel_ntb_unmap_pci_bar(struct ntb_softc *ntb)
1035 {
1036 struct ntb_pci_bar_info *bar;
1037 int i;
1038
1039 if (ntb->bar0_dma_map != NULL) {
1040 bus_dmamap_unload(ntb->bar0_dma_tag, ntb->bar0_dma_map);
1041 bus_dmamap_destroy(ntb->bar0_dma_tag, ntb->bar0_dma_map);
1042 }
1043 if (ntb->bar0_dma_tag != NULL)
1044 bus_dma_tag_destroy(ntb->bar0_dma_tag);
1045 for (i = 0; i < NTB_MAX_BARS; i++) {
1046 bar = &ntb->bar_info[i];
1047 if (bar->pci_resource != NULL)
1048 bus_release_resource(ntb->device, SYS_RES_MEMORY,
1049 bar->pci_resource_id, bar->pci_resource);
1050 }
1051 }
1052
1053 static int
intel_ntb_setup_msix(struct ntb_softc * ntb,uint32_t num_vectors)1054 intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
1055 {
1056 uint32_t i;
1057 int rc;
1058
1059 for (i = 0; i < num_vectors; i++) {
1060 ntb->int_info[i].rid = i + 1;
1061 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
1062 SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE);
1063 if (ntb->int_info[i].res == NULL) {
1064 device_printf(ntb->device,
1065 "bus_alloc_resource failed\n");
1066 return (ENOMEM);
1067 }
1068 ntb->int_info[i].tag = NULL;
1069 ntb->allocated_interrupts++;
1070 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
1071 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr,
1072 &ntb->msix_vec[i], &ntb->int_info[i].tag);
1073 if (rc != 0) {
1074 device_printf(ntb->device, "bus_setup_intr failed\n");
1075 return (ENXIO);
1076 }
1077 }
1078 return (0);
1079 }
1080
1081 /*
1082 * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector
1083 * cannot be allocated for each MSI-X message. JHB seems to think remapping
1084 * should be okay. This tunable should enable us to test that hypothesis
1085 * when someone gets their hands on some Xeon hardware.
1086 */
1087 static int ntb_force_remap_mode;
1088 SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN,
1089 &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped"
1090 " to a smaller number of ithreads, even if the desired number are "
1091 "available");
1092
1093 /*
1094 * In case it is NOT ok, give consumers an abort button.
1095 */
1096 static int ntb_prefer_intx;
1097 SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN,
1098 &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather "
1099 "than remapping MSI-X messages over available slots (match Linux driver "
1100 "behavior)");
1101
1102 /*
1103 * Remap the desired number of MSI-X messages to available ithreads in a simple
1104 * round-robin fashion.
1105 */
1106 static int
intel_ntb_remap_msix(device_t dev,uint32_t desired,uint32_t avail)1107 intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
1108 {
1109 u_int *vectors;
1110 uint32_t i;
1111 int rc;
1112
1113 if (ntb_prefer_intx != 0)
1114 return (ENXIO);
1115
1116 vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK);
1117
1118 for (i = 0; i < desired; i++)
1119 vectors[i] = (i % avail) + 1;
1120
1121 rc = pci_remap_msix(dev, desired, vectors);
1122 free(vectors, M_NTB);
1123 return (rc);
1124 }
1125
1126 static int
intel_ntb_xeon_gen3_init_isr(struct ntb_softc * ntb)1127 intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb)
1128 {
1129 uint64_t i, reg;
1130 uint32_t desired_vectors, num_vectors;
1131 int rc;
1132
1133 ntb->allocated_interrupts = 0;
1134 ntb->last_ts = ticks;
1135
1136 /* Mask all the interrupts, including hardware interrupt */
1137 intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, ~0ULL);
1138
1139 /* Clear Interrupt Status */
1140 reg = intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS);
1141 intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS, reg);
1142
1143 num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
1144 XEON_GEN3_DB_MSIX_VECTOR_COUNT);
1145
1146 rc = pci_alloc_msix(ntb->device, &num_vectors);
1147 if (rc != 0) {
1148 device_printf(ntb->device,
1149 "Interrupt allocation failed %d\n", rc);
1150 return (rc);
1151 }
1152 if (desired_vectors != num_vectors) {
1153 device_printf(ntb->device, "Couldn't get %d vectors\n",
1154 XEON_GEN3_DB_MSIX_VECTOR_COUNT);
1155 return (ENXIO);
1156 }
1157 /* 32 db + 1 hardware */
1158 if (num_vectors == XEON_GEN3_DB_MSIX_VECTOR_COUNT) {
1159 /* Program INTVECXX source register */
1160 for (i = 0; i < XEON_GEN3_DB_MSIX_VECTOR_COUNT; i++) {
1161 /* interrupt source i for vector i */
1162 intel_ntb_reg_write(1, XEON_GEN3_REG_IMINTVEC00 + i, i);
1163 if (i == (XEON_GEN3_DB_MSIX_VECTOR_COUNT - 1)) {
1164 intel_ntb_reg_write(1,
1165 XEON_GEN3_REG_IMINTVEC00 + i,
1166 XEON_GEN3_LINK_VECTOR_INDEX);
1167 }
1168 }
1169
1170 intel_ntb_create_msix_vec(ntb, num_vectors);
1171 rc = intel_ntb_setup_msix(ntb, num_vectors);
1172
1173 /* enable all interrupts */
1174 intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, 0ULL);
1175 } else {
1176 device_printf(ntb->device, "need to remap interrupts, giving up.\n");
1177 return (ENXIO);
1178 }
1179
1180 return (0);
1181 }
1182
1183 static int
intel_ntb_init_isr(struct ntb_softc * ntb)1184 intel_ntb_init_isr(struct ntb_softc *ntb)
1185 {
1186 uint32_t desired_vectors, num_vectors;
1187 int rc;
1188
1189 ntb->allocated_interrupts = 0;
1190 ntb->last_ts = ticks;
1191
1192 /*
1193 * Mask all doorbell interrupts. (Except link events!)
1194 */
1195 DB_MASK_LOCK(ntb);
1196 ntb->db_mask = ntb->db_valid_mask;
1197 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1198 DB_MASK_UNLOCK(ntb);
1199
1200 num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
1201 ntb->db_count);
1202 if (desired_vectors >= 1) {
1203 rc = pci_alloc_msix(ntb->device, &num_vectors);
1204
1205 if (ntb_force_remap_mode != 0 && rc == 0 &&
1206 num_vectors == desired_vectors)
1207 num_vectors--;
1208
1209 if (rc == 0 && num_vectors < desired_vectors) {
1210 rc = intel_ntb_remap_msix(ntb->device, desired_vectors,
1211 num_vectors);
1212 if (rc == 0)
1213 num_vectors = desired_vectors;
1214 else
1215 pci_release_msi(ntb->device);
1216 }
1217 if (rc != 0)
1218 num_vectors = 1;
1219 } else
1220 num_vectors = 1;
1221
1222 if (ntb->type == NTB_XEON_GEN1 && num_vectors < ntb->db_vec_count) {
1223 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1224 device_printf(ntb->device,
1225 "Errata workaround does not support MSI or INTX\n");
1226 return (EINVAL);
1227 }
1228
1229 ntb->db_vec_count = 1;
1230 ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT;
1231 rc = intel_ntb_setup_legacy_interrupt(ntb);
1232 } else {
1233 if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS &&
1234 HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1235 device_printf(ntb->device,
1236 "Errata workaround expects %d doorbell bits\n",
1237 XEON_NONLINK_DB_MSIX_BITS);
1238 return (EINVAL);
1239 }
1240
1241 intel_ntb_create_msix_vec(ntb, num_vectors);
1242 rc = intel_ntb_setup_msix(ntb, num_vectors);
1243 }
1244 if (rc != 0) {
1245 device_printf(ntb->device,
1246 "Error allocating interrupts: %d\n", rc);
1247 intel_ntb_free_msix_vec(ntb);
1248 }
1249
1250 return (rc);
1251 }
1252
1253 static int
intel_ntb_setup_legacy_interrupt(struct ntb_softc * ntb)1254 intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
1255 {
1256 int rc;
1257
1258 ntb->int_info[0].rid = 0;
1259 ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ,
1260 &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE);
1261 if (ntb->int_info[0].res == NULL) {
1262 device_printf(ntb->device, "bus_alloc_resource failed\n");
1263 return (ENOMEM);
1264 }
1265
1266 ntb->int_info[0].tag = NULL;
1267 ntb->allocated_interrupts = 1;
1268
1269 rc = bus_setup_intr(ntb->device, ntb->int_info[0].res,
1270 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr,
1271 ntb, &ntb->int_info[0].tag);
1272 if (rc != 0) {
1273 device_printf(ntb->device, "bus_setup_intr failed\n");
1274 return (ENXIO);
1275 }
1276
1277 return (0);
1278 }
1279
1280 static void
intel_ntb_teardown_interrupts(struct ntb_softc * ntb)1281 intel_ntb_teardown_interrupts(struct ntb_softc *ntb)
1282 {
1283 struct ntb_int_info *current_int;
1284 int i;
1285
1286 for (i = 0; i < ntb->allocated_interrupts; i++) {
1287 current_int = &ntb->int_info[i];
1288 if (current_int->tag != NULL)
1289 bus_teardown_intr(ntb->device, current_int->res,
1290 current_int->tag);
1291
1292 if (current_int->res != NULL)
1293 bus_release_resource(ntb->device, SYS_RES_IRQ,
1294 rman_get_rid(current_int->res), current_int->res);
1295 }
1296
1297 intel_ntb_free_msix_vec(ntb);
1298 pci_release_msi(ntb->device);
1299 }
1300
1301 static inline uint64_t
db_ioread(struct ntb_softc * ntb,uint64_t regoff)1302 db_ioread(struct ntb_softc *ntb, uint64_t regoff)
1303 {
1304
1305 switch (ntb->type) {
1306 case NTB_ATOM:
1307 case NTB_XEON_GEN3:
1308 return (intel_ntb_reg_read(8, regoff));
1309 case NTB_XEON_GEN1:
1310 return (intel_ntb_reg_read(2, regoff));
1311 }
1312 __assert_unreachable();
1313 }
1314
1315 static inline void
db_iowrite(struct ntb_softc * ntb,uint64_t regoff,uint64_t val)1316 db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1317 {
1318
1319 KASSERT((val & ~ntb->db_valid_mask) == 0,
1320 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1321 (uintmax_t)(val & ~ntb->db_valid_mask),
1322 (uintmax_t)ntb->db_valid_mask));
1323
1324 if (regoff == ntb->self_reg->db_mask)
1325 DB_MASK_ASSERT(ntb, MA_OWNED);
1326 db_iowrite_raw(ntb, regoff, val);
1327 }
1328
1329 static inline void
db_iowrite_raw(struct ntb_softc * ntb,uint64_t regoff,uint64_t val)1330 db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1331 {
1332
1333 switch (ntb->type) {
1334 case NTB_ATOM:
1335 case NTB_XEON_GEN3:
1336 intel_ntb_reg_write(8, regoff, val);
1337 break;
1338 case NTB_XEON_GEN1:
1339 intel_ntb_reg_write(2, regoff, (uint16_t)val);
1340 break;
1341 }
1342 }
1343
1344 static void
intel_ntb_db_set_mask(device_t dev,uint64_t bits)1345 intel_ntb_db_set_mask(device_t dev, uint64_t bits)
1346 {
1347 struct ntb_softc *ntb = device_get_softc(dev);
1348
1349 DB_MASK_LOCK(ntb);
1350 ntb->db_mask |= bits;
1351 if (!HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1352 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1353 DB_MASK_UNLOCK(ntb);
1354 }
1355
1356 static void
intel_ntb_db_clear_mask(device_t dev,uint64_t bits)1357 intel_ntb_db_clear_mask(device_t dev, uint64_t bits)
1358 {
1359 struct ntb_softc *ntb = device_get_softc(dev);
1360 uint64_t ibits;
1361 int i;
1362
1363 KASSERT((bits & ~ntb->db_valid_mask) == 0,
1364 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1365 (uintmax_t)(bits & ~ntb->db_valid_mask),
1366 (uintmax_t)ntb->db_valid_mask));
1367
1368 DB_MASK_LOCK(ntb);
1369 ibits = ntb->fake_db & ntb->db_mask & bits;
1370 ntb->db_mask &= ~bits;
1371 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1372 /* Simulate fake interrupts if unmasked DB bits are set. */
1373 ntb->force_db |= ibits;
1374 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1375 if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0)
1376 swi_sched(ntb->int_info[i].tag, 0);
1377 }
1378 } else {
1379 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1380 }
1381 DB_MASK_UNLOCK(ntb);
1382 }
1383
1384 static uint64_t
intel_ntb_db_read(device_t dev)1385 intel_ntb_db_read(device_t dev)
1386 {
1387 struct ntb_softc *ntb = device_get_softc(dev);
1388
1389 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1390 return (ntb->fake_db);
1391 if (ntb->type == NTB_XEON_GEN3)
1392 return (intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS));
1393 else
1394 return (db_ioread(ntb, ntb->self_reg->db_bell));
1395 }
1396
1397 static void
intel_ntb_db_clear(device_t dev,uint64_t bits)1398 intel_ntb_db_clear(device_t dev, uint64_t bits)
1399 {
1400 struct ntb_softc *ntb = device_get_softc(dev);
1401
1402 KASSERT((bits & ~ntb->db_valid_mask) == 0,
1403 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1404 (uintmax_t)(bits & ~ntb->db_valid_mask),
1405 (uintmax_t)ntb->db_valid_mask));
1406
1407 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1408 DB_MASK_LOCK(ntb);
1409 ntb->fake_db &= ~bits;
1410 DB_MASK_UNLOCK(ntb);
1411 return;
1412 }
1413
1414 if (ntb->type == NTB_XEON_GEN3)
1415 intel_ntb_reg_write(4, XEON_GEN3_REG_IMINT_STATUS,
1416 (uint32_t)bits);
1417 else
1418 db_iowrite(ntb, ntb->self_reg->db_bell, bits);
1419 }
1420
1421 static inline uint64_t
intel_ntb_vec_mask(struct ntb_softc * ntb,uint64_t db_vector)1422 intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
1423 {
1424 uint64_t shift, mask;
1425
1426 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1427 /*
1428 * Remap vectors in custom way to make at least first
1429 * three doorbells to not generate stray events.
1430 * This breaks Linux compatibility (if one existed)
1431 * when more then one DB is used (not by if_ntb).
1432 */
1433 if (db_vector < XEON_NONLINK_DB_MSIX_BITS - 1)
1434 return (1 << db_vector);
1435 if (db_vector == XEON_NONLINK_DB_MSIX_BITS - 1)
1436 return (0x7ffc);
1437 }
1438
1439 shift = ntb->db_vec_shift;
1440 mask = (1ull << shift) - 1;
1441 return (mask << (shift * db_vector));
1442 }
1443
1444 static void
intel_ntb_interrupt(struct ntb_softc * ntb,uint32_t vec)1445 intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
1446 {
1447 uint64_t vec_mask;
1448
1449 ntb->last_ts = ticks;
1450 vec_mask = intel_ntb_vec_mask(ntb, vec);
1451
1452 if (ntb->type == NTB_XEON_GEN3 && vec == XEON_GEN3_LINK_VECTOR_INDEX)
1453 vec_mask |= ntb->db_link_mask;
1454 if ((vec_mask & ntb->db_link_mask) != 0) {
1455 if (intel_ntb_poll_link(ntb))
1456 ntb_link_event(ntb->device);
1457 if (ntb->type == NTB_XEON_GEN3)
1458 intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS,
1459 intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS));
1460 }
1461
1462 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1463 (vec_mask & ntb->db_link_mask) == 0) {
1464 DB_MASK_LOCK(ntb);
1465
1466 /*
1467 * Do not report same DB events again if not cleared yet,
1468 * unless the mask was just cleared for them and this
1469 * interrupt handler call can be the consequence of it.
1470 */
1471 vec_mask &= ~ntb->fake_db | ntb->force_db;
1472 ntb->force_db &= ~vec_mask;
1473
1474 /* Update our internal doorbell register. */
1475 ntb->fake_db |= vec_mask;
1476
1477 /* Do not report masked DB events. */
1478 vec_mask &= ~ntb->db_mask;
1479
1480 DB_MASK_UNLOCK(ntb);
1481 }
1482
1483 if ((vec_mask & ntb->db_valid_mask) != 0)
1484 ntb_db_event(ntb->device, vec);
1485 }
1486
1487 static void
ndev_vec_isr(void * arg)1488 ndev_vec_isr(void *arg)
1489 {
1490 struct ntb_vec *nvec = arg;
1491
1492 intel_ntb_interrupt(nvec->ntb, nvec->num);
1493 }
1494
1495 static void
ndev_irq_isr(void * arg)1496 ndev_irq_isr(void *arg)
1497 {
1498 /* If we couldn't set up MSI-X, we only have the one vector. */
1499 intel_ntb_interrupt(arg, 0);
1500 }
1501
1502 static int
intel_ntb_create_msix_vec(struct ntb_softc * ntb,uint32_t num_vectors)1503 intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
1504 {
1505 uint32_t i;
1506
1507 ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB,
1508 M_ZERO | M_WAITOK);
1509 for (i = 0; i < num_vectors; i++) {
1510 ntb->msix_vec[i].num = i;
1511 ntb->msix_vec[i].ntb = ntb;
1512 }
1513
1514 return (0);
1515 }
1516
1517 static void
intel_ntb_free_msix_vec(struct ntb_softc * ntb)1518 intel_ntb_free_msix_vec(struct ntb_softc *ntb)
1519 {
1520
1521 if (ntb->msix_vec == NULL)
1522 return;
1523
1524 free(ntb->msix_vec, M_NTB);
1525 ntb->msix_vec = NULL;
1526 }
1527
1528 static void
intel_ntb_get_msix_info(struct ntb_softc * ntb)1529 intel_ntb_get_msix_info(struct ntb_softc *ntb)
1530 {
1531 struct pci_devinfo *dinfo;
1532 struct pcicfg_msix *msix;
1533 uint32_t laddr, data, i, offset;
1534
1535 dinfo = device_get_ivars(ntb->device);
1536 msix = &dinfo->cfg.msix;
1537
1538 CTASSERT(XEON_NONLINK_DB_MSIX_BITS == nitems(ntb->msix_data));
1539
1540 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1541 offset = msix->msix_table_offset + i * PCI_MSIX_ENTRY_SIZE;
1542
1543 laddr = bus_read_4(msix->msix_table_res, offset +
1544 PCI_MSIX_ENTRY_LOWER_ADDR);
1545 intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
1546
1547 KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE,
1548 ("local MSIX addr 0x%x not in MSI base 0x%x", laddr,
1549 MSI_INTEL_ADDR_BASE));
1550 ntb->msix_data[i].nmd_ofs = laddr;
1551
1552 data = bus_read_4(msix->msix_table_res, offset +
1553 PCI_MSIX_ENTRY_DATA);
1554 intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
1555
1556 ntb->msix_data[i].nmd_data = data;
1557 }
1558 }
1559
1560 static struct ntb_hw_info *
intel_ntb_get_device_info(uint32_t device_id)1561 intel_ntb_get_device_info(uint32_t device_id)
1562 {
1563 struct ntb_hw_info *ep;
1564
1565 for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
1566 if (ep->device_id == device_id)
1567 return (ep);
1568 }
1569 return (NULL);
1570 }
1571
1572 static void
intel_ntb_teardown_xeon(struct ntb_softc * ntb)1573 intel_ntb_teardown_xeon(struct ntb_softc *ntb)
1574 {
1575
1576 if (ntb->reg != NULL)
1577 intel_ntb_link_disable(ntb->device);
1578 }
1579
1580 static void
intel_ntb_detect_max_mw(struct ntb_softc * ntb)1581 intel_ntb_detect_max_mw(struct ntb_softc *ntb)
1582 {
1583
1584 switch (ntb->type) {
1585 case NTB_ATOM:
1586 ntb->mw_count = ATOM_MW_COUNT;
1587 break;
1588 case NTB_XEON_GEN1:
1589 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1590 ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT;
1591 else
1592 ntb->mw_count = XEON_SNB_MW_COUNT;
1593 break;
1594 case NTB_XEON_GEN3:
1595 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1596 ntb->mw_count = XEON_GEN3_SPLIT_MW_COUNT;
1597 else
1598 ntb->mw_count = XEON_GEN3_MW_COUNT;
1599 break;
1600 }
1601 }
1602
1603 static int
intel_ntb_detect_xeon(struct ntb_softc * ntb)1604 intel_ntb_detect_xeon(struct ntb_softc *ntb)
1605 {
1606 uint8_t ppd, conn_type;
1607
1608 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1);
1609 ntb->ppd = ppd;
1610
1611 if ((ppd & XEON_PPD_DEV_TYPE) != 0)
1612 ntb->dev_type = NTB_DEV_DSD;
1613 else
1614 ntb->dev_type = NTB_DEV_USD;
1615
1616 if ((ppd & XEON_PPD_SPLIT_BAR) != 0)
1617 ntb->features |= NTB_SPLIT_BAR;
1618
1619 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1620 !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
1621 device_printf(ntb->device,
1622 "Can not apply SB01BASE_LOCKUP workaround "
1623 "with split BARs disabled!\n");
1624 device_printf(ntb->device,
1625 "Expect system hangs under heavy NTB traffic!\n");
1626 ntb->features &= ~NTB_SB01BASE_LOCKUP;
1627 }
1628
1629 /*
1630 * SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP
1631 * errata workaround; only do one at a time.
1632 */
1633 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1634 ntb->features &= ~NTB_SDOORBELL_LOCKUP;
1635
1636 conn_type = ppd & XEON_PPD_CONN_TYPE;
1637 switch (conn_type) {
1638 case NTB_CONN_B2B:
1639 ntb->conn_type = conn_type;
1640 break;
1641 case NTB_CONN_RP:
1642 case NTB_CONN_TRANSPARENT:
1643 default:
1644 device_printf(ntb->device, "Unsupported connection type: %u\n",
1645 (unsigned)conn_type);
1646 return (ENXIO);
1647 }
1648 return (0);
1649 }
1650
1651 static int
intel_ntb_detect_atom(struct ntb_softc * ntb)1652 intel_ntb_detect_atom(struct ntb_softc *ntb)
1653 {
1654 uint32_t ppd, conn_type;
1655
1656 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
1657 ntb->ppd = ppd;
1658
1659 if ((ppd & ATOM_PPD_DEV_TYPE) != 0)
1660 ntb->dev_type = NTB_DEV_DSD;
1661 else
1662 ntb->dev_type = NTB_DEV_USD;
1663
1664 conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8;
1665 switch (conn_type) {
1666 case NTB_CONN_B2B:
1667 ntb->conn_type = conn_type;
1668 break;
1669 default:
1670 device_printf(ntb->device, "Unsupported NTB configuration\n");
1671 return (ENXIO);
1672 }
1673 return (0);
1674 }
1675
1676 static int
intel_ntb_detect_xeon_gen3(struct ntb_softc * ntb)1677 intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb)
1678 {
1679 uint8_t ppd, conn_type;
1680
1681 ppd = pci_read_config(ntb->device, XEON_GEN3_INT_REG_PPD, 1);
1682 ntb->ppd = ppd;
1683
1684 /* check port definition */
1685 conn_type = XEON_GEN3_REG_PPD_PORT_DEF_F(ppd);
1686 switch (conn_type) {
1687 case NTB_CONN_B2B:
1688 ntb->conn_type = conn_type;
1689 break;
1690 default:
1691 device_printf(ntb->device, "Unsupported connection type: %u\n",
1692 conn_type);
1693 return (ENXIO);
1694 }
1695
1696 /* check cross link configuration status */
1697 if (XEON_GEN3_REG_PPD_CONF_STS_F(ppd)) {
1698 /* NTB Port is configured as DSD/USP */
1699 ntb->dev_type = NTB_DEV_DSD;
1700 } else {
1701 /* NTB Port is configured as USD/DSP */
1702 ntb->dev_type = NTB_DEV_USD;
1703 }
1704
1705 if (XEON_GEN3_REG_PPD_ONE_MSIX_F(ppd)) {
1706 /*
1707 * This bit when set, causes only a single MSI-X message to be
1708 * generated if MSI-X is enabled.
1709 */
1710 ntb->features |= NTB_ONE_MSIX;
1711 }
1712
1713 if (XEON_GEN3_REG_PPD_BAR45_SPL_F(ppd)) {
1714 /* BARs 4 and 5 are presented as two 32b non-prefetchable BARs */
1715 ntb->features |= NTB_SPLIT_BAR;
1716 }
1717
1718 device_printf(ntb->device, "conn type 0x%02x, dev type 0x%02x,"
1719 "features 0x%02x\n", ntb->conn_type, ntb->dev_type, ntb->features);
1720
1721 return (0);
1722 }
1723
1724 static int
intel_ntb_xeon_init_dev(struct ntb_softc * ntb)1725 intel_ntb_xeon_init_dev(struct ntb_softc *ntb)
1726 {
1727 int rc;
1728
1729 ntb->spad_count = XEON_SPAD_COUNT;
1730 ntb->db_count = XEON_DB_COUNT;
1731 ntb->db_link_mask = XEON_DB_LINK_BIT;
1732 ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT;
1733 ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT;
1734
1735 if (ntb->conn_type != NTB_CONN_B2B) {
1736 device_printf(ntb->device, "Connection type %d not supported\n",
1737 ntb->conn_type);
1738 return (ENXIO);
1739 }
1740
1741 ntb->reg = &xeon_reg;
1742 ntb->self_reg = &xeon_pri_reg;
1743 ntb->peer_reg = &xeon_b2b_reg;
1744 ntb->xlat_reg = &xeon_sec_xlat;
1745
1746 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1747 ntb->force_db = ntb->fake_db = 0;
1748 ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) %
1749 ntb->mw_count;
1750 intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
1751 g_ntb_msix_idx, ntb->msix_mw_idx);
1752 rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
1753 VM_MEMATTR_UNCACHEABLE);
1754 KASSERT(rc == 0, ("shouldn't fail"));
1755 } else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
1756 /*
1757 * There is a Xeon hardware errata related to writes to SDOORBELL or
1758 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space,
1759 * which may hang the system. To workaround this, use a memory
1760 * window to access the interrupt and scratch pad registers on the
1761 * remote system.
1762 */
1763 ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) %
1764 ntb->mw_count;
1765 intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
1766 g_ntb_mw_idx, ntb->b2b_mw_idx);
1767 rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
1768 VM_MEMATTR_UNCACHEABLE);
1769 KASSERT(rc == 0, ("shouldn't fail"));
1770 } else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14))
1771 /*
1772 * HW Errata on bit 14 of b2bdoorbell register. Writes will not be
1773 * mirrored to the remote system. Shrink the number of bits by one,
1774 * since bit 14 is the last bit.
1775 *
1776 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register
1777 * anyway. Nor for non-B2B connection types.
1778 */
1779 ntb->db_count = XEON_DB_COUNT - 1;
1780
1781 ntb->db_valid_mask = (1ull << ntb->db_count) - 1;
1782
1783 if (ntb->dev_type == NTB_DEV_USD)
1784 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr,
1785 &xeon_b2b_usd_addr);
1786 else
1787 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr,
1788 &xeon_b2b_dsd_addr);
1789 if (rc != 0)
1790 return (rc);
1791
1792 /* Enable Bus Master and Memory Space on the secondary side */
1793 intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET,
1794 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1795
1796 /*
1797 * Mask all doorbell interrupts.
1798 */
1799 DB_MASK_LOCK(ntb);
1800 ntb->db_mask = ntb->db_valid_mask;
1801 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1802 DB_MASK_UNLOCK(ntb);
1803
1804 rc = intel_ntb_init_isr(ntb);
1805 return (rc);
1806 }
1807
1808 static int
intel_ntb_xeon_gen3_init_dev(struct ntb_softc * ntb)1809 intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb)
1810 {
1811 int rc;
1812
1813 ntb->spad_count = XEON_GEN3_SPAD_COUNT;
1814 ntb->db_count = XEON_GEN3_DB_COUNT;
1815 ntb->db_link_mask = XEON_GEN3_DB_LINK_BIT;
1816 ntb->db_vec_count = XEON_GEN3_DB_MSIX_VECTOR_COUNT;
1817 ntb->db_vec_shift = XEON_GEN3_DB_MSIX_VECTOR_SHIFT;
1818
1819 if (ntb->conn_type != NTB_CONN_B2B) {
1820 device_printf(ntb->device, "Connection type %d not supported\n",
1821 ntb->conn_type);
1822 return (ENXIO);
1823 }
1824
1825 ntb->reg = &xeon_gen3_reg;
1826 ntb->self_reg = &xeon_gen3_pri_reg;
1827 ntb->peer_reg = &xeon_gen3_b2b_reg;
1828 ntb->xlat_reg = &xeon_gen3_sec_xlat;
1829
1830 ntb->db_valid_mask = (1ULL << ntb->db_count) - 1;
1831
1832 xeon_gen3_setup_b2b_mw(ntb);
1833
1834 /* Enable Bus Master and Memory Space on the External Side */
1835 intel_ntb_reg_write(2, XEON_GEN3_EXT_REG_PCI_CMD,
1836 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1837
1838 /* Setup Interrupt */
1839 rc = intel_ntb_xeon_gen3_init_isr(ntb);
1840
1841 return (rc);
1842 }
1843
1844 static int
intel_ntb_atom_init_dev(struct ntb_softc * ntb)1845 intel_ntb_atom_init_dev(struct ntb_softc *ntb)
1846 {
1847 int error;
1848
1849 KASSERT(ntb->conn_type == NTB_CONN_B2B,
1850 ("Unsupported NTB configuration (%d)\n", ntb->conn_type));
1851
1852 ntb->spad_count = ATOM_SPAD_COUNT;
1853 ntb->db_count = ATOM_DB_COUNT;
1854 ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT;
1855 ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT;
1856 ntb->db_valid_mask = (1ull << ntb->db_count) - 1;
1857
1858 ntb->reg = &atom_reg;
1859 ntb->self_reg = &atom_pri_reg;
1860 ntb->peer_reg = &atom_b2b_reg;
1861 ntb->xlat_reg = &atom_sec_xlat;
1862
1863 /*
1864 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is
1865 * resolved. Mask transaction layer internal parity errors.
1866 */
1867 pci_write_config(ntb->device, 0xFC, 0x4, 4);
1868
1869 configure_atom_secondary_side_bars(ntb);
1870
1871 /* Enable Bus Master and Memory Space on the secondary side */
1872 intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
1873 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1874
1875 error = intel_ntb_init_isr(ntb);
1876 if (error != 0)
1877 return (error);
1878
1879 /* Initiate PCI-E link training */
1880 intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1881
1882 callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb);
1883
1884 return (0);
1885 }
1886
1887 /* XXX: Linux driver doesn't seem to do any of this for Atom. */
1888 static void
configure_atom_secondary_side_bars(struct ntb_softc * ntb)1889 configure_atom_secondary_side_bars(struct ntb_softc *ntb)
1890 {
1891
1892 if (ntb->dev_type == NTB_DEV_USD) {
1893 intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1894 XEON_B2B_BAR2_ADDR64);
1895 intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1896 XEON_B2B_BAR4_ADDR64);
1897 intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1898 intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1899 } else {
1900 intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1901 XEON_B2B_BAR2_ADDR64);
1902 intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1903 XEON_B2B_BAR4_ADDR64);
1904 intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1905 intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1906 }
1907 }
1908
1909 /*
1910 * When working around Xeon SDOORBELL errata by remapping remote registers in a
1911 * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW
1912 * remains for use by a higher layer.
1913 *
1914 * Will only be used if working around SDOORBELL errata and the BIOS-configured
1915 * MW size is sufficiently large.
1916 */
1917 static unsigned int ntb_b2b_mw_share;
1918 SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share,
1919 0, "If enabled (non-zero), prefer to share half of the B2B peer register "
1920 "MW with higher level consumers. Both sides of the NTB MUST set the same "
1921 "value here.");
1922
1923 static void
xeon_reset_sbar_size(struct ntb_softc * ntb,enum ntb_bar idx,enum ntb_bar regbar)1924 xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx,
1925 enum ntb_bar regbar)
1926 {
1927 struct ntb_pci_bar_info *bar;
1928 uint8_t bar_sz;
1929
1930 if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3)
1931 return;
1932
1933 bar = &ntb->bar_info[idx];
1934 bar_sz = pci_read_config(ntb->device, bar->psz_off, 1);
1935 if (idx == regbar) {
1936 if (ntb->b2b_off != 0)
1937 bar_sz--;
1938 else
1939 bar_sz = 0;
1940 }
1941 pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1);
1942 bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1);
1943 (void)bar_sz;
1944 }
1945
1946 static void
xeon_set_sbar_base_and_limit(struct ntb_softc * ntb,uint64_t bar_addr,enum ntb_bar idx,enum ntb_bar regbar)1947 xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr,
1948 enum ntb_bar idx, enum ntb_bar regbar)
1949 {
1950 uint64_t reg_val;
1951 uint32_t base_reg, lmt_reg;
1952
1953 bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg);
1954 if (idx == regbar) {
1955 if (ntb->b2b_off)
1956 bar_addr += ntb->b2b_off;
1957 else
1958 bar_addr = 0;
1959 }
1960
1961 if (!bar_is_64bit(ntb, idx)) {
1962 intel_ntb_reg_write(4, base_reg, bar_addr);
1963 reg_val = intel_ntb_reg_read(4, base_reg);
1964 (void)reg_val;
1965
1966 intel_ntb_reg_write(4, lmt_reg, bar_addr);
1967 reg_val = intel_ntb_reg_read(4, lmt_reg);
1968 (void)reg_val;
1969 } else {
1970 intel_ntb_reg_write(8, base_reg, bar_addr);
1971 reg_val = intel_ntb_reg_read(8, base_reg);
1972 (void)reg_val;
1973
1974 intel_ntb_reg_write(8, lmt_reg, bar_addr);
1975 reg_val = intel_ntb_reg_read(8, lmt_reg);
1976 (void)reg_val;
1977 }
1978 }
1979
1980 static void
xeon_set_pbar_xlat(struct ntb_softc * ntb,uint64_t base_addr,enum ntb_bar idx)1981 xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx)
1982 {
1983 struct ntb_pci_bar_info *bar;
1984
1985 bar = &ntb->bar_info[idx];
1986 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
1987 intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr);
1988 base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off);
1989 } else {
1990 intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr);
1991 base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off);
1992 }
1993 (void)base_addr;
1994 }
1995
1996 static int
xeon_setup_b2b_mw(struct ntb_softc * ntb,const struct ntb_b2b_addr * addr,const struct ntb_b2b_addr * peer_addr)1997 xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
1998 const struct ntb_b2b_addr *peer_addr)
1999 {
2000 struct ntb_pci_bar_info *b2b_bar;
2001 vm_size_t bar_size;
2002 uint64_t bar_addr;
2003 enum ntb_bar b2b_bar_num, i;
2004
2005 if (ntb->b2b_mw_idx == B2B_MW_DISABLED) {
2006 b2b_bar = NULL;
2007 b2b_bar_num = NTB_CONFIG_BAR;
2008 ntb->b2b_off = 0;
2009 } else {
2010 b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
2011 KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS,
2012 ("invalid b2b mw bar"));
2013
2014 b2b_bar = &ntb->bar_info[b2b_bar_num];
2015 bar_size = b2b_bar->size;
2016
2017 if (ntb_b2b_mw_share != 0 &&
2018 (bar_size >> 1) >= XEON_B2B_MIN_SIZE)
2019 ntb->b2b_off = bar_size >> 1;
2020 else if (bar_size >= XEON_B2B_MIN_SIZE) {
2021 ntb->b2b_off = 0;
2022 } else {
2023 device_printf(ntb->device,
2024 "B2B bar size is too small!\n");
2025 return (EIO);
2026 }
2027 }
2028
2029 /*
2030 * Reset the secondary bar sizes to match the primary bar sizes.
2031 * (Except, disable or halve the size of the B2B secondary bar.)
2032 */
2033 for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++)
2034 xeon_reset_sbar_size(ntb, i, b2b_bar_num);
2035
2036 bar_addr = 0;
2037 if (b2b_bar_num == NTB_CONFIG_BAR)
2038 bar_addr = addr->bar0_addr;
2039 else if (b2b_bar_num == NTB_B2B_BAR_1)
2040 bar_addr = addr->bar2_addr64;
2041 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2042 bar_addr = addr->bar4_addr64;
2043 else if (b2b_bar_num == NTB_B2B_BAR_2)
2044 bar_addr = addr->bar4_addr32;
2045 else if (b2b_bar_num == NTB_B2B_BAR_3)
2046 bar_addr = addr->bar5_addr32;
2047 else
2048 KASSERT(false, ("invalid bar"));
2049
2050 intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
2051
2052 /*
2053 * Other SBARs are normally hit by the PBAR xlat, except for the b2b
2054 * register BAR. The B2B BAR is either disabled above or configured
2055 * half-size. It starts at PBAR xlat + offset.
2056 *
2057 * Also set up incoming BAR limits == base (zero length window).
2058 */
2059 xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1,
2060 b2b_bar_num);
2061 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2062 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32,
2063 NTB_B2B_BAR_2, b2b_bar_num);
2064 xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32,
2065 NTB_B2B_BAR_3, b2b_bar_num);
2066 } else
2067 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64,
2068 NTB_B2B_BAR_2, b2b_bar_num);
2069
2070 /* Zero incoming translation addrs */
2071 intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
2072 intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
2073
2074 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2075 uint32_t xlat_reg, lmt_reg;
2076 enum ntb_bar bar_num;
2077
2078 /*
2079 * We point the chosen MSIX MW BAR xlat to remote LAPIC for
2080 * workaround
2081 */
2082 bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
2083 bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg);
2084 if (bar_is_64bit(ntb, bar_num)) {
2085 intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE);
2086 ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg);
2087 intel_ntb_reg_write(8, lmt_reg, 0);
2088 } else {
2089 intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE);
2090 ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg);
2091 intel_ntb_reg_write(4, lmt_reg, 0);
2092 }
2093
2094 ntb->peer_lapic_bar = &ntb->bar_info[bar_num];
2095 }
2096 (void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
2097 (void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
2098
2099 /* Zero outgoing translation limits (whole bar size windows) */
2100 intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
2101 intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
2102
2103 /* Set outgoing translation offsets */
2104 xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1);
2105 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2106 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2);
2107 xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3);
2108 } else
2109 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2);
2110
2111 /* Set the translation offset for B2B registers */
2112 bar_addr = 0;
2113 if (b2b_bar_num == NTB_CONFIG_BAR)
2114 bar_addr = peer_addr->bar0_addr;
2115 else if (b2b_bar_num == NTB_B2B_BAR_1)
2116 bar_addr = peer_addr->bar2_addr64;
2117 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2118 bar_addr = peer_addr->bar4_addr64;
2119 else if (b2b_bar_num == NTB_B2B_BAR_2)
2120 bar_addr = peer_addr->bar4_addr32;
2121 else if (b2b_bar_num == NTB_B2B_BAR_3)
2122 bar_addr = peer_addr->bar5_addr32;
2123 else
2124 KASSERT(false, ("invalid bar"));
2125
2126 /*
2127 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits
2128 * at a time.
2129 */
2130 intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
2131 intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
2132 return (0);
2133 }
2134
2135 static int
xeon_gen3_setup_b2b_mw(struct ntb_softc * ntb)2136 xeon_gen3_setup_b2b_mw(struct ntb_softc *ntb)
2137 {
2138 uint64_t reg;
2139 uint32_t embarsz, imbarsz;
2140
2141 /* IMBAR1SZ should be equal to EMBAR1SZ */
2142 embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR1SZ, 1);
2143 imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR1SZ, 1);
2144 if (embarsz != imbarsz) {
2145 device_printf(ntb->device,
2146 "IMBAR1SZ (%u) should be equal to EMBAR1SZ (%u)\n",
2147 imbarsz, embarsz);
2148 return (EIO);
2149 }
2150
2151 /* IMBAR2SZ should be equal to EMBAR2SZ */
2152 embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR2SZ, 1);
2153 imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR2SZ, 1);
2154 if (embarsz != imbarsz) {
2155 device_printf(ntb->device,
2156 "IMBAR2SZ (%u) should be equal to EMBAR2SZ (%u)\n",
2157 imbarsz, embarsz);
2158 return (EIO);
2159 }
2160
2161 /* Client will provide the incoming IMBAR1/2XBASE, zero it for now */
2162 intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XBASE, 0);
2163 intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XBASE, 0);
2164
2165 /*
2166 * If the value in IMBAR1XLIMIT is set equal to the value in IMBAR1XBASE,
2167 * the local memory window exposure from EMBAR1 is disabled.
2168 * Note: It is needed to avoid malicious access.
2169 */
2170 intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XLIMIT, 0);
2171 intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XLIMIT, 0);
2172
2173 /* Config outgoing translation limits (whole bar size windows) */
2174 reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR1XBASE);
2175 reg += ntb->bar_info[NTB_B2B_BAR_1].size;
2176 intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR1XLIMIT, reg);
2177
2178 reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR2XBASE);
2179 reg += ntb->bar_info[NTB_B2B_BAR_2].size;
2180 intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR2XLIMIT, reg);
2181
2182 return (0);
2183 }
2184
2185 static inline bool
_xeon_link_is_up(struct ntb_softc * ntb)2186 _xeon_link_is_up(struct ntb_softc *ntb)
2187 {
2188
2189 if (ntb->conn_type == NTB_CONN_TRANSPARENT)
2190 return (true);
2191 return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0);
2192 }
2193
2194 static inline bool
link_is_up(struct ntb_softc * ntb)2195 link_is_up(struct ntb_softc *ntb)
2196 {
2197
2198 if (ntb->type == NTB_XEON_GEN1 || ntb->type == NTB_XEON_GEN3)
2199 return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good ||
2200 !HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)));
2201
2202 KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
2203 return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0);
2204 }
2205
2206 static inline bool
atom_link_is_err(struct ntb_softc * ntb)2207 atom_link_is_err(struct ntb_softc *ntb)
2208 {
2209 uint32_t status;
2210
2211 KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
2212
2213 status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
2214 if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0)
2215 return (true);
2216
2217 status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
2218 return ((status & ATOM_IBIST_ERR_OFLOW) != 0);
2219 }
2220
2221 /* Atom does not have link status interrupt, poll on that platform */
2222 static void
atom_link_hb(void * arg)2223 atom_link_hb(void *arg)
2224 {
2225 struct ntb_softc *ntb = arg;
2226 sbintime_t timo, poll_ts;
2227
2228 timo = NTB_HB_TIMEOUT * hz;
2229 poll_ts = ntb->last_ts + timo;
2230
2231 /*
2232 * Delay polling the link status if an interrupt was received, unless
2233 * the cached link status says the link is down.
2234 */
2235 if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) {
2236 timo = poll_ts - ticks;
2237 goto out;
2238 }
2239
2240 if (intel_ntb_poll_link(ntb))
2241 ntb_link_event(ntb->device);
2242
2243 if (!link_is_up(ntb) && atom_link_is_err(ntb)) {
2244 /* Link is down with error, proceed with recovery */
2245 callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb);
2246 return;
2247 }
2248
2249 out:
2250 callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb);
2251 }
2252
2253 static void
atom_perform_link_restart(struct ntb_softc * ntb)2254 atom_perform_link_restart(struct ntb_softc *ntb)
2255 {
2256 uint32_t status;
2257
2258 /* Driver resets the NTB ModPhy lanes - magic! */
2259 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
2260 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
2261 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
2262 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
2263
2264 /* Driver waits 100ms to allow the NTB ModPhy to settle */
2265 pause("ModPhy", hz / 10);
2266
2267 /* Clear AER Errors, write to clear */
2268 status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
2269 status &= PCIM_AER_COR_REPLAY_ROLLOVER;
2270 intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
2271
2272 /* Clear unexpected electrical idle event in LTSSM, write to clear */
2273 status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
2274 status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
2275 intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
2276
2277 /* Clear DeSkew Buffer error, write to clear */
2278 status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
2279 status |= ATOM_DESKEWSTS_DBERR;
2280 intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
2281
2282 status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
2283 status &= ATOM_IBIST_ERR_OFLOW;
2284 intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
2285
2286 /* Releases the NTB state machine to allow the link to retrain */
2287 status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
2288 status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
2289 intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
2290 }
2291
2292 static int
intel_ntb_port_number(device_t dev)2293 intel_ntb_port_number(device_t dev)
2294 {
2295 struct ntb_softc *ntb = device_get_softc(dev);
2296
2297 return (ntb->dev_type == NTB_DEV_USD ? 0 : 1);
2298 }
2299
2300 static int
intel_ntb_peer_port_count(device_t dev)2301 intel_ntb_peer_port_count(device_t dev)
2302 {
2303
2304 return (1);
2305 }
2306
2307 static int
intel_ntb_peer_port_number(device_t dev,int pidx)2308 intel_ntb_peer_port_number(device_t dev, int pidx)
2309 {
2310 struct ntb_softc *ntb = device_get_softc(dev);
2311
2312 if (pidx != 0)
2313 return (-EINVAL);
2314
2315 return (ntb->dev_type == NTB_DEV_USD ? 1 : 0);
2316 }
2317
2318 static int
intel_ntb_peer_port_idx(device_t dev,int port)2319 intel_ntb_peer_port_idx(device_t dev, int port)
2320 {
2321 int peer_port;
2322
2323 peer_port = intel_ntb_peer_port_number(dev, 0);
2324 if (peer_port == -EINVAL || port != peer_port)
2325 return (-EINVAL);
2326
2327 return (0);
2328 }
2329
2330 static int
intel_ntb_link_enable(device_t dev,enum ntb_speed speed __unused,enum ntb_width width __unused)2331 intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
2332 enum ntb_width width __unused)
2333 {
2334 struct ntb_softc *ntb = device_get_softc(dev);
2335 uint32_t cntl;
2336
2337 intel_ntb_printf(2, "%s\n", __func__);
2338
2339 if (ntb->type == NTB_ATOM) {
2340 pci_write_config(ntb->device, NTB_PPD_OFFSET,
2341 ntb->ppd | ATOM_PPD_INIT_LINK, 4);
2342 return (0);
2343 }
2344
2345 if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
2346 ntb_link_event(dev);
2347 return (0);
2348 }
2349
2350 cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2351 cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
2352 cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
2353 cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
2354 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2355 cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP;
2356 intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2357 return (0);
2358 }
2359
2360 static int
intel_ntb_link_disable(device_t dev)2361 intel_ntb_link_disable(device_t dev)
2362 {
2363 struct ntb_softc *ntb = device_get_softc(dev);
2364 uint32_t cntl;
2365
2366 intel_ntb_printf(2, "%s\n", __func__);
2367
2368 if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
2369 ntb_link_event(dev);
2370 return (0);
2371 }
2372
2373 cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2374 cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
2375 cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
2376 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2377 cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP);
2378 cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
2379 intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2380 return (0);
2381 }
2382
2383 static bool
intel_ntb_link_enabled(device_t dev)2384 intel_ntb_link_enabled(device_t dev)
2385 {
2386 struct ntb_softc *ntb = device_get_softc(dev);
2387 uint32_t cntl;
2388
2389 if (ntb->type == NTB_ATOM) {
2390 cntl = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
2391 return ((cntl & ATOM_PPD_INIT_LINK) != 0);
2392 }
2393
2394 if (ntb->conn_type == NTB_CONN_TRANSPARENT)
2395 return (true);
2396
2397 cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2398 return ((cntl & NTB_CNTL_LINK_DISABLE) == 0);
2399 }
2400
2401 static void
recover_atom_link(void * arg)2402 recover_atom_link(void *arg)
2403 {
2404 struct ntb_softc *ntb = arg;
2405 unsigned speed, width, oldspeed, oldwidth;
2406 uint32_t status32;
2407
2408 atom_perform_link_restart(ntb);
2409
2410 /*
2411 * There is a potential race between the 2 NTB devices recovering at
2412 * the same time. If the times are the same, the link will not recover
2413 * and the driver will be stuck in this loop forever. Add a random
2414 * interval to the recovery time to prevent this race.
2415 */
2416 status32 = arc4random() % ATOM_LINK_RECOVERY_TIME;
2417 pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000);
2418
2419 if (atom_link_is_err(ntb))
2420 goto retry;
2421
2422 status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2423 if ((status32 & ATOM_CNTL_LINK_DOWN) != 0)
2424 goto out;
2425
2426 status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2427 width = NTB_LNK_STA_WIDTH(status32);
2428 speed = status32 & NTB_LINK_SPEED_MASK;
2429
2430 oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta);
2431 oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK;
2432 if (oldwidth != width || oldspeed != speed)
2433 goto retry;
2434
2435 out:
2436 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb,
2437 ntb);
2438 return;
2439
2440 retry:
2441 callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link,
2442 ntb);
2443 }
2444
2445 /*
2446 * Polls the HW link status register(s); returns true if something has changed.
2447 */
2448 static bool
intel_ntb_poll_link(struct ntb_softc * ntb)2449 intel_ntb_poll_link(struct ntb_softc *ntb)
2450 {
2451 uint32_t ntb_cntl;
2452 uint16_t reg_val;
2453
2454 if (ntb->type == NTB_ATOM) {
2455 ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2456 if (ntb_cntl == ntb->ntb_ctl)
2457 return (false);
2458
2459 ntb->ntb_ctl = ntb_cntl;
2460 ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2461 } else {
2462 if (ntb->type == NTB_XEON_GEN1)
2463 db_iowrite_raw(ntb, ntb->self_reg->db_bell,
2464 ntb->db_link_mask);
2465
2466 reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
2467 if (reg_val == ntb->lnk_sta)
2468 return (false);
2469
2470 ntb->lnk_sta = reg_val;
2471
2472 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2473 if (_xeon_link_is_up(ntb)) {
2474 if (!ntb->peer_msix_good) {
2475 callout_reset(&ntb->peer_msix_work, 0,
2476 intel_ntb_exchange_msix, ntb);
2477 return (false);
2478 }
2479 } else {
2480 ntb->peer_msix_good = false;
2481 ntb->peer_msix_done = false;
2482 }
2483 }
2484 }
2485 return (true);
2486 }
2487
2488 static inline enum ntb_speed
intel_ntb_link_sta_speed(struct ntb_softc * ntb)2489 intel_ntb_link_sta_speed(struct ntb_softc *ntb)
2490 {
2491
2492 if (!link_is_up(ntb))
2493 return (NTB_SPEED_NONE);
2494 return (ntb->lnk_sta & NTB_LINK_SPEED_MASK);
2495 }
2496
2497 static inline enum ntb_width
intel_ntb_link_sta_width(struct ntb_softc * ntb)2498 intel_ntb_link_sta_width(struct ntb_softc *ntb)
2499 {
2500
2501 if (!link_is_up(ntb))
2502 return (NTB_WIDTH_NONE);
2503 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
2504 }
2505
2506 SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
2507 "Driver state, statistics, and HW registers");
2508
2509 #define NTB_REGSZ_MASK (3ul << 30)
2510 #define NTB_REG_64 (1ul << 30)
2511 #define NTB_REG_32 (2ul << 30)
2512 #define NTB_REG_16 (3ul << 30)
2513 #define NTB_REG_8 (0ul << 30)
2514
2515 #define NTB_DB_READ (1ul << 29)
2516 #define NTB_PCI_REG (1ul << 28)
2517 #define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG)
2518
2519 static void
intel_ntb_sysctl_init(struct ntb_softc * ntb)2520 intel_ntb_sysctl_init(struct ntb_softc *ntb)
2521 {
2522 struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar;
2523 struct sysctl_ctx_list *ctx;
2524 struct sysctl_oid *tree, *tmptree;
2525
2526 ctx = device_get_sysctl_ctx(ntb->device);
2527 globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
2528
2529 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "link_status",
2530 CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, ntb, 0,
2531 sysctl_handle_link_status_human, "A",
2532 "Link status (human readable)");
2533 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "active",
2534 CTLFLAG_RD | CTLTYPE_UINT | CTLFLAG_MPSAFE, ntb, 0,
2535 sysctl_handle_link_status, "IU",
2536 "Link status (1=active, 0=inactive)");
2537 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "admin_up",
2538 CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, ntb, 0,
2539 sysctl_handle_link_admin, "IU",
2540 "Set/get interface status (1=UP, 0=DOWN)");
2541
2542 tree = SYSCTL_ADD_NODE(ctx, globals, OID_AUTO, "debug_info",
2543 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2544 "Driver state, statistics, and HW registers");
2545 tree_par = SYSCTL_CHILDREN(tree);
2546
2547 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD,
2548 &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port");
2549 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD,
2550 &ntb->dev_type, 0, "0 - USD; 1 - DSD");
2551 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD,
2552 &ntb->ppd, 0, "Raw PPD register (cached)");
2553
2554 if (ntb->b2b_mw_idx != B2B_MW_DISABLED) {
2555 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD,
2556 &ntb->b2b_mw_idx, 0,
2557 "Index of the MW used for B2B remote register access");
2558 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off",
2559 CTLFLAG_RD, &ntb->b2b_off,
2560 "If non-zero, offset of B2B register region in shared MW");
2561 }
2562
2563 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features",
2564 CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, ntb, 0,
2565 sysctl_handle_features, "A", "Features/errata of this NTB device");
2566
2567 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD,
2568 __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0,
2569 "NTB CTL register (cached)");
2570 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD,
2571 __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0,
2572 "LNK STA register (cached)");
2573
2574 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD,
2575 &ntb->mw_count, 0, "MW count");
2576 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD,
2577 &ntb->spad_count, 0, "Scratchpad count");
2578 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD,
2579 &ntb->db_count, 0, "Doorbell count");
2580 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD,
2581 &ntb->db_vec_count, 0, "Doorbell vector count");
2582 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD,
2583 &ntb->db_vec_shift, 0, "Doorbell vector shift");
2584
2585 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD,
2586 &ntb->db_valid_mask, "Doorbell valid mask");
2587 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD,
2588 &ntb->db_link_mask, "Doorbell link mask");
2589 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD,
2590 &ntb->db_mask, "Doorbell mask (cached)");
2591
2592 tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers",
2593 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2594 "Raw HW registers (big-endian)");
2595 regpar = SYSCTL_CHILDREN(tmptree);
2596
2597 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl",
2598 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2599 NTB_REG_32 | ntb->reg->ntb_ctl, sysctl_handle_register, "IU",
2600 "NTB Control register");
2601 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap",
2602 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2603 NTB_REG_32 | 0x19c, sysctl_handle_register, "IU",
2604 "NTB Link Capabilities");
2605 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon",
2606 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2607 NTB_REG_32 | 0x1a0, sysctl_handle_register, "IU",
2608 "NTB Link Control register");
2609
2610 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask",
2611 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2612 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask,
2613 sysctl_handle_register, "QU", "Doorbell mask register");
2614 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell",
2615 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2616 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell,
2617 sysctl_handle_register, "QU", "Doorbell register");
2618
2619 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23",
2620 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2621 NTB_REG_64 | ntb->xlat_reg->bar2_xlat,
2622 sysctl_handle_register, "QU", "Incoming XLAT23 register");
2623 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2624 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4",
2625 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2626 NTB_REG_32 | ntb->xlat_reg->bar4_xlat,
2627 sysctl_handle_register, "IU", "Incoming XLAT4 register");
2628 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5",
2629 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2630 NTB_REG_32 | ntb->xlat_reg->bar5_xlat,
2631 sysctl_handle_register, "IU", "Incoming XLAT5 register");
2632 } else {
2633 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45",
2634 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2635 NTB_REG_64 | ntb->xlat_reg->bar4_xlat,
2636 sysctl_handle_register, "QU", "Incoming XLAT45 register");
2637 }
2638
2639 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23",
2640 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2641 NTB_REG_64 | ntb->xlat_reg->bar2_limit,
2642 sysctl_handle_register, "QU", "Incoming LMT23 register");
2643 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2644 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4",
2645 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2646 NTB_REG_32 | ntb->xlat_reg->bar4_limit,
2647 sysctl_handle_register, "IU", "Incoming LMT4 register");
2648 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5",
2649 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2650 NTB_REG_32 | ntb->xlat_reg->bar5_limit,
2651 sysctl_handle_register, "IU", "Incoming LMT5 register");
2652 } else {
2653 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45",
2654 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2655 NTB_REG_64 | ntb->xlat_reg->bar4_limit,
2656 sysctl_handle_register, "QU", "Incoming LMT45 register");
2657 }
2658
2659 if (ntb->type == NTB_ATOM)
2660 return;
2661
2662 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats",
2663 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW statistics");
2664 statpar = SYSCTL_CHILDREN(tmptree);
2665 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss",
2666 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2667 NTB_REG_16 | XEON_USMEMMISS_OFFSET,
2668 sysctl_handle_register, "SU", "Upstream Memory Miss");
2669
2670 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err",
2671 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW errors");
2672 errpar = SYSCTL_CHILDREN(tmptree);
2673
2674 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd",
2675 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2676 NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET,
2677 sysctl_handle_register, "CU", "PPD");
2678
2679 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz",
2680 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2681 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET,
2682 sysctl_handle_register, "CU", "PBAR23 SZ (log2)");
2683 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz",
2684 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2685 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET,
2686 sysctl_handle_register, "CU", "PBAR4 SZ (log2)");
2687 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz",
2688 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2689 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET,
2690 sysctl_handle_register, "CU", "PBAR5 SZ (log2)");
2691
2692 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz",
2693 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2694 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET,
2695 sysctl_handle_register, "CU", "SBAR23 SZ (log2)");
2696 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz",
2697 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2698 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET,
2699 sysctl_handle_register, "CU", "SBAR4 SZ (log2)");
2700 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz",
2701 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2702 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET,
2703 sysctl_handle_register, "CU", "SBAR5 SZ (log2)");
2704
2705 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts",
2706 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2707 NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET,
2708 sysctl_handle_register, "SU", "DEVSTS");
2709 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts",
2710 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2711 NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET,
2712 sysctl_handle_register, "SU", "LNKSTS");
2713 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts",
2714 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2715 NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET,
2716 sysctl_handle_register, "SU", "SLNKSTS");
2717
2718 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts",
2719 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2720 NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET,
2721 sysctl_handle_register, "IU", "UNCERRSTS");
2722 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts",
2723 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2724 NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET,
2725 sysctl_handle_register, "IU", "CORERRSTS");
2726
2727 if (ntb->conn_type != NTB_CONN_B2B)
2728 return;
2729
2730 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01l",
2731 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2732 NTB_REG_32 | XEON_B2B_XLAT_OFFSETL,
2733 sysctl_handle_register, "IU", "Outgoing XLAT0L register");
2734 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01u",
2735 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2736 NTB_REG_32 | XEON_B2B_XLAT_OFFSETU,
2737 sysctl_handle_register, "IU", "Outgoing XLAT0U register");
2738 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23",
2739 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2740 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off,
2741 sysctl_handle_register, "QU", "Outgoing XLAT23 register");
2742 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2743 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4",
2744 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2745 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2746 sysctl_handle_register, "IU", "Outgoing XLAT4 register");
2747 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5",
2748 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2749 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off,
2750 sysctl_handle_register, "IU", "Outgoing XLAT5 register");
2751 } else {
2752 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45",
2753 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2754 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2755 sysctl_handle_register, "QU", "Outgoing XLAT45 register");
2756 }
2757
2758 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23",
2759 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2760 NTB_REG_64 | XEON_PBAR2LMT_OFFSET,
2761 sysctl_handle_register, "QU", "Outgoing LMT23 register");
2762 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2763 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4",
2764 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2765 NTB_REG_32 | XEON_PBAR4LMT_OFFSET,
2766 sysctl_handle_register, "IU", "Outgoing LMT4 register");
2767 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5",
2768 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2769 NTB_REG_32 | XEON_PBAR5LMT_OFFSET,
2770 sysctl_handle_register, "IU", "Outgoing LMT5 register");
2771 } else {
2772 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45",
2773 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2774 NTB_REG_64 | XEON_PBAR4LMT_OFFSET,
2775 sysctl_handle_register, "QU", "Outgoing LMT45 register");
2776 }
2777
2778 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base",
2779 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2780 NTB_REG_64 | ntb->xlat_reg->bar0_base,
2781 sysctl_handle_register, "QU", "Secondary BAR01 base register");
2782 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base",
2783 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2784 NTB_REG_64 | ntb->xlat_reg->bar2_base,
2785 sysctl_handle_register, "QU", "Secondary BAR23 base register");
2786 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2787 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base",
2788 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2789 NTB_REG_32 | ntb->xlat_reg->bar4_base,
2790 sysctl_handle_register, "IU",
2791 "Secondary BAR4 base register");
2792 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base",
2793 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2794 NTB_REG_32 | ntb->xlat_reg->bar5_base,
2795 sysctl_handle_register, "IU",
2796 "Secondary BAR5 base register");
2797 } else {
2798 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base",
2799 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2800 NTB_REG_64 | ntb->xlat_reg->bar4_base,
2801 sysctl_handle_register, "QU",
2802 "Secondary BAR45 base register");
2803 }
2804 }
2805
2806 static int
sysctl_handle_features(SYSCTL_HANDLER_ARGS)2807 sysctl_handle_features(SYSCTL_HANDLER_ARGS)
2808 {
2809 struct ntb_softc *ntb = arg1;
2810 struct sbuf sb;
2811 int error;
2812
2813 sbuf_new_for_sysctl(&sb, NULL, 256, req);
2814
2815 sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR);
2816 error = sbuf_finish(&sb);
2817 sbuf_delete(&sb);
2818
2819 if (error || !req->newptr)
2820 return (error);
2821 return (EINVAL);
2822 }
2823
2824 static int
sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)2825 sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
2826 {
2827 struct ntb_softc *ntb = arg1;
2828 unsigned old, new;
2829 int error;
2830
2831 old = intel_ntb_link_enabled(ntb->device);
2832
2833 error = SYSCTL_OUT(req, &old, sizeof(old));
2834 if (error != 0 || req->newptr == NULL)
2835 return (error);
2836
2837 error = SYSCTL_IN(req, &new, sizeof(new));
2838 if (error != 0)
2839 return (error);
2840
2841 intel_ntb_printf(0, "Admin set interface state to '%sabled'\n",
2842 (new != 0)? "en" : "dis");
2843
2844 if (new != 0)
2845 error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
2846 else
2847 error = intel_ntb_link_disable(ntb->device);
2848 return (error);
2849 }
2850
2851 static int
sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)2852 sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)
2853 {
2854 struct ntb_softc *ntb = arg1;
2855 struct sbuf sb;
2856 enum ntb_speed speed;
2857 enum ntb_width width;
2858 int error;
2859
2860 sbuf_new_for_sysctl(&sb, NULL, 32, req);
2861
2862 if (intel_ntb_link_is_up(ntb->device, &speed, &width))
2863 sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u",
2864 (unsigned)speed, (unsigned)width);
2865 else
2866 sbuf_printf(&sb, "down");
2867
2868 error = sbuf_finish(&sb);
2869 sbuf_delete(&sb);
2870
2871 if (error || !req->newptr)
2872 return (error);
2873 return (EINVAL);
2874 }
2875
2876 static int
sysctl_handle_link_status(SYSCTL_HANDLER_ARGS)2877 sysctl_handle_link_status(SYSCTL_HANDLER_ARGS)
2878 {
2879 struct ntb_softc *ntb = arg1;
2880 unsigned res;
2881 int error;
2882
2883 res = intel_ntb_link_is_up(ntb->device, NULL, NULL);
2884
2885 error = SYSCTL_OUT(req, &res, sizeof(res));
2886 if (error || !req->newptr)
2887 return (error);
2888 return (EINVAL);
2889 }
2890
2891 static int
sysctl_handle_register(SYSCTL_HANDLER_ARGS)2892 sysctl_handle_register(SYSCTL_HANDLER_ARGS)
2893 {
2894 struct ntb_softc *ntb;
2895 const void *outp;
2896 uintptr_t sz;
2897 uint64_t umv;
2898 char be[sizeof(umv)];
2899 size_t outsz;
2900 uint32_t reg;
2901 bool db, pci;
2902 int error;
2903
2904 ntb = arg1;
2905 reg = arg2 & ~NTB_REGFLAGS_MASK;
2906 sz = arg2 & NTB_REGSZ_MASK;
2907 db = (arg2 & NTB_DB_READ) != 0;
2908 pci = (arg2 & NTB_PCI_REG) != 0;
2909
2910 KASSERT(!(db && pci), ("bogus"));
2911
2912 if (db) {
2913 KASSERT(sz == NTB_REG_64, ("bogus"));
2914 umv = db_ioread(ntb, reg);
2915 outsz = sizeof(uint64_t);
2916 } else {
2917 switch (sz) {
2918 case NTB_REG_64:
2919 if (pci)
2920 umv = pci_read_config(ntb->device, reg, 8);
2921 else
2922 umv = intel_ntb_reg_read(8, reg);
2923 outsz = sizeof(uint64_t);
2924 break;
2925 case NTB_REG_32:
2926 if (pci)
2927 umv = pci_read_config(ntb->device, reg, 4);
2928 else
2929 umv = intel_ntb_reg_read(4, reg);
2930 outsz = sizeof(uint32_t);
2931 break;
2932 case NTB_REG_16:
2933 if (pci)
2934 umv = pci_read_config(ntb->device, reg, 2);
2935 else
2936 umv = intel_ntb_reg_read(2, reg);
2937 outsz = sizeof(uint16_t);
2938 break;
2939 case NTB_REG_8:
2940 if (pci)
2941 umv = pci_read_config(ntb->device, reg, 1);
2942 else
2943 umv = intel_ntb_reg_read(1, reg);
2944 outsz = sizeof(uint8_t);
2945 break;
2946 default:
2947 panic("bogus");
2948 break;
2949 }
2950 }
2951
2952 /* Encode bigendian so that sysctl -x is legible. */
2953 be64enc(be, umv);
2954 outp = ((char *)be) + sizeof(umv) - outsz;
2955
2956 error = SYSCTL_OUT(req, outp, outsz);
2957 if (error || !req->newptr)
2958 return (error);
2959 return (EINVAL);
2960 }
2961
2962 static unsigned
intel_ntb_user_mw_to_idx(struct ntb_softc * ntb,unsigned uidx)2963 intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
2964 {
2965
2966 if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2967 uidx >= ntb->b2b_mw_idx) ||
2968 (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2969 uidx++;
2970 if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2971 uidx >= ntb->b2b_mw_idx) &&
2972 (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2973 uidx++;
2974 return (uidx);
2975 }
2976
2977 #ifndef EARLY_AP_STARTUP
2978 static int msix_ready;
2979
2980 static void
intel_ntb_msix_ready(void * arg __unused)2981 intel_ntb_msix_ready(void *arg __unused)
2982 {
2983
2984 msix_ready = 1;
2985 }
2986 SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY,
2987 intel_ntb_msix_ready, NULL);
2988 #endif
2989
2990 static void
intel_ntb_exchange_msix(void * ctx)2991 intel_ntb_exchange_msix(void *ctx)
2992 {
2993 struct ntb_softc *ntb;
2994 uint32_t val;
2995 unsigned i;
2996
2997 ntb = ctx;
2998
2999 if (ntb->peer_msix_good)
3000 goto msix_good;
3001 if (ntb->peer_msix_done)
3002 goto msix_done;
3003
3004 #ifndef EARLY_AP_STARTUP
3005 /* Block MSIX negotiation until SMP started and IRQ reshuffled. */
3006 if (!msix_ready)
3007 goto reschedule;
3008 #endif
3009
3010 intel_ntb_get_msix_info(ntb);
3011 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3012 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i,
3013 ntb->msix_data[i].nmd_data);
3014 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i,
3015 ntb->msix_data[i].nmd_ofs - ntb->msix_xlat);
3016 }
3017 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
3018
3019 intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val);
3020 if (val != NTB_MSIX_VER_GUARD)
3021 goto reschedule;
3022
3023 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3024 intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val);
3025 intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
3026 ntb->peer_msix_data[i].nmd_data = val;
3027 intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val);
3028 intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
3029 ntb->peer_msix_data[i].nmd_ofs = val;
3030 }
3031
3032 ntb->peer_msix_done = true;
3033
3034 msix_done:
3035 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
3036 intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val);
3037 if (val != NTB_MSIX_RECEIVED)
3038 goto reschedule;
3039
3040 intel_ntb_spad_clear(ntb->device);
3041 ntb->peer_msix_good = true;
3042 /* Give peer time to see our NTB_MSIX_RECEIVED. */
3043 goto reschedule;
3044
3045 msix_good:
3046 intel_ntb_poll_link(ntb);
3047 ntb_link_event(ntb->device);
3048 return;
3049
3050 reschedule:
3051 ntb->lnk_sta = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
3052 if (_xeon_link_is_up(ntb)) {
3053 callout_reset(&ntb->peer_msix_work,
3054 hz * (ntb->peer_msix_good ? 2 : 1) / 10,
3055 intel_ntb_exchange_msix, ntb);
3056 } else
3057 intel_ntb_spad_clear(ntb->device);
3058 }
3059
3060 /*
3061 * Public API to the rest of the OS
3062 */
3063
3064 static uint8_t
intel_ntb_spad_count(device_t dev)3065 intel_ntb_spad_count(device_t dev)
3066 {
3067 struct ntb_softc *ntb = device_get_softc(dev);
3068
3069 return (ntb->spad_count);
3070 }
3071
3072 static uint8_t
intel_ntb_mw_count(device_t dev)3073 intel_ntb_mw_count(device_t dev)
3074 {
3075 struct ntb_softc *ntb = device_get_softc(dev);
3076 uint8_t res;
3077
3078 res = ntb->mw_count;
3079 if (ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0)
3080 res--;
3081 if (ntb->msix_mw_idx != B2B_MW_DISABLED)
3082 res--;
3083 return (res);
3084 }
3085
3086 static int
intel_ntb_spad_write(device_t dev,unsigned int idx,uint32_t val)3087 intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
3088 {
3089 struct ntb_softc *ntb = device_get_softc(dev);
3090
3091 if (idx >= ntb->spad_count)
3092 return (EINVAL);
3093
3094 intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
3095
3096 return (0);
3097 }
3098
3099 /*
3100 * Zeros the local scratchpad.
3101 */
3102 static void
intel_ntb_spad_clear(device_t dev)3103 intel_ntb_spad_clear(device_t dev)
3104 {
3105 struct ntb_softc *ntb = device_get_softc(dev);
3106 unsigned i;
3107
3108 for (i = 0; i < ntb->spad_count; i++)
3109 intel_ntb_spad_write(dev, i, 0);
3110 }
3111
3112 static int
intel_ntb_spad_read(device_t dev,unsigned int idx,uint32_t * val)3113 intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
3114 {
3115 struct ntb_softc *ntb = device_get_softc(dev);
3116
3117 if (idx >= ntb->spad_count)
3118 return (EINVAL);
3119
3120 *val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
3121
3122 return (0);
3123 }
3124
3125 static int
intel_ntb_peer_spad_write(device_t dev,unsigned int idx,uint32_t val)3126 intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
3127 {
3128 struct ntb_softc *ntb = device_get_softc(dev);
3129
3130 if (idx >= ntb->spad_count)
3131 return (EINVAL);
3132
3133 if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
3134 intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
3135 else
3136 intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
3137
3138 return (0);
3139 }
3140
3141 static int
intel_ntb_peer_spad_read(device_t dev,unsigned int idx,uint32_t * val)3142 intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
3143 {
3144 struct ntb_softc *ntb = device_get_softc(dev);
3145
3146 if (idx >= ntb->spad_count)
3147 return (EINVAL);
3148
3149 if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
3150 *val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
3151 else
3152 *val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
3153
3154 return (0);
3155 }
3156
3157 static int
intel_ntb_mw_get_range(device_t dev,unsigned mw_idx,vm_paddr_t * base,caddr_t * vbase,size_t * size,size_t * align,size_t * align_size,bus_addr_t * plimit)3158 intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
3159 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
3160 bus_addr_t *plimit)
3161 {
3162 struct ntb_softc *ntb = device_get_softc(dev);
3163 struct ntb_pci_bar_info *bar;
3164 bus_addr_t limit;
3165 size_t bar_b2b_off;
3166 enum ntb_bar bar_num;
3167
3168 if (mw_idx >= intel_ntb_mw_count(dev))
3169 return (EINVAL);
3170 mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx);
3171
3172 bar_num = intel_ntb_mw_to_bar(ntb, mw_idx);
3173 bar = &ntb->bar_info[bar_num];
3174 bar_b2b_off = 0;
3175 if (mw_idx == ntb->b2b_mw_idx) {
3176 KASSERT(ntb->b2b_off != 0,
3177 ("user shouldn't get non-shared b2b mw"));
3178 bar_b2b_off = ntb->b2b_off;
3179 }
3180
3181 if (bar_is_64bit(ntb, bar_num))
3182 limit = BUS_SPACE_MAXADDR;
3183 else
3184 limit = BUS_SPACE_MAXADDR_32BIT;
3185
3186 if (base != NULL)
3187 *base = bar->pbase + bar_b2b_off;
3188 if (vbase != NULL)
3189 *vbase = bar->vbase + bar_b2b_off;
3190 if (size != NULL)
3191 *size = bar->size - bar_b2b_off;
3192 if (align != NULL)
3193 *align = bar->size;
3194 if (align_size != NULL)
3195 *align_size = 1;
3196 if (plimit != NULL)
3197 *plimit = limit;
3198 return (0);
3199 }
3200
3201 static int
intel_ntb_mw_set_trans(device_t dev,unsigned idx,bus_addr_t addr,size_t size)3202 intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
3203 {
3204 struct ntb_softc *ntb = device_get_softc(dev);
3205 struct ntb_pci_bar_info *bar;
3206 uint64_t base, limit, reg_val;
3207 size_t bar_size, mw_size;
3208 uint32_t base_reg, xlat_reg, limit_reg;
3209 enum ntb_bar bar_num;
3210
3211 if (idx >= intel_ntb_mw_count(dev))
3212 return (EINVAL);
3213 idx = intel_ntb_user_mw_to_idx(ntb, idx);
3214
3215 bar_num = intel_ntb_mw_to_bar(ntb, idx);
3216 bar = &ntb->bar_info[bar_num];
3217
3218 bar_size = bar->size;
3219 if (idx == ntb->b2b_mw_idx)
3220 mw_size = bar_size - ntb->b2b_off;
3221 else
3222 mw_size = bar_size;
3223
3224 /* Hardware requires that addr is aligned to bar size */
3225 if ((addr & (bar_size - 1)) != 0)
3226 return (EINVAL);
3227
3228 if (size > mw_size)
3229 return (EINVAL);
3230
3231 bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg);
3232
3233 limit = 0;
3234 if (bar_is_64bit(ntb, bar_num)) {
3235 if (ntb->type == NTB_XEON_GEN3)
3236 base = addr;
3237 else
3238 base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
3239
3240 if (limit_reg != 0 && size != mw_size)
3241 limit = base + size;
3242 else
3243 limit = base + mw_size;
3244
3245 /* Set and verify translation address */
3246 intel_ntb_reg_write(8, xlat_reg, addr);
3247 reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
3248 if (reg_val != addr) {
3249 intel_ntb_reg_write(8, xlat_reg, 0);
3250 return (EIO);
3251 }
3252
3253 /* Set and verify the limit */
3254 intel_ntb_reg_write(8, limit_reg, limit);
3255 reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
3256 if (reg_val != limit) {
3257 intel_ntb_reg_write(8, limit_reg, base);
3258 intel_ntb_reg_write(8, xlat_reg, 0);
3259 return (EIO);
3260 }
3261 } else {
3262 /* Configure 32-bit (split) BAR MW */
3263 if (ntb->type == NTB_XEON_GEN3)
3264 return (EIO);
3265
3266 if ((addr & UINT32_MAX) != addr)
3267 return (ERANGE);
3268 if (((addr + size) & UINT32_MAX) != (addr + size))
3269 return (ERANGE);
3270
3271 base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
3272
3273 if (limit_reg != 0 && size != mw_size)
3274 limit = base + size;
3275
3276 /* Set and verify translation address */
3277 intel_ntb_reg_write(4, xlat_reg, addr);
3278 reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
3279 if (reg_val != addr) {
3280 intel_ntb_reg_write(4, xlat_reg, 0);
3281 return (EIO);
3282 }
3283
3284 /* Set and verify the limit */
3285 intel_ntb_reg_write(4, limit_reg, limit);
3286 reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
3287 if (reg_val != limit) {
3288 intel_ntb_reg_write(4, limit_reg, base);
3289 intel_ntb_reg_write(4, xlat_reg, 0);
3290 return (EIO);
3291 }
3292 }
3293 return (0);
3294 }
3295
3296 static int
intel_ntb_mw_clear_trans(device_t dev,unsigned mw_idx)3297 intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
3298 {
3299
3300 return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0));
3301 }
3302
3303 static int
intel_ntb_mw_get_wc(device_t dev,unsigned idx,vm_memattr_t * mode)3304 intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode)
3305 {
3306 struct ntb_softc *ntb = device_get_softc(dev);
3307 struct ntb_pci_bar_info *bar;
3308
3309 if (idx >= intel_ntb_mw_count(dev))
3310 return (EINVAL);
3311 idx = intel_ntb_user_mw_to_idx(ntb, idx);
3312
3313 bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
3314 *mode = bar->map_mode;
3315 return (0);
3316 }
3317
3318 static int
intel_ntb_mw_set_wc(device_t dev,unsigned idx,vm_memattr_t mode)3319 intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode)
3320 {
3321 struct ntb_softc *ntb = device_get_softc(dev);
3322
3323 if (idx >= intel_ntb_mw_count(dev))
3324 return (EINVAL);
3325
3326 idx = intel_ntb_user_mw_to_idx(ntb, idx);
3327 return (intel_ntb_mw_set_wc_internal(ntb, idx, mode));
3328 }
3329
3330 static int
intel_ntb_mw_set_wc_internal(struct ntb_softc * ntb,unsigned idx,vm_memattr_t mode)3331 intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
3332 {
3333 struct ntb_pci_bar_info *bar;
3334 int rc;
3335
3336 bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
3337 if (bar->map_mode == mode)
3338 return (0);
3339
3340 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mode);
3341 if (rc == 0)
3342 bar->map_mode = mode;
3343
3344 return (rc);
3345 }
3346
3347 static void
intel_ntb_peer_db_set(device_t dev,uint64_t bits)3348 intel_ntb_peer_db_set(device_t dev, uint64_t bits)
3349 {
3350 struct ntb_softc *ntb = device_get_softc(dev);
3351 uint64_t db;
3352
3353 if ((bits & ~ntb->db_valid_mask) != 0) {
3354 device_printf(ntb->device, "Invalid doorbell bits %#jx\n",
3355 (uintmax_t)bits);
3356 return;
3357 }
3358
3359 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
3360 struct ntb_pci_bar_info *lapic;
3361 unsigned i;
3362
3363 lapic = ntb->peer_lapic_bar;
3364
3365 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3366 if ((bits & intel_ntb_db_vector_mask(dev, i)) != 0)
3367 bus_space_write_4(lapic->pci_bus_tag,
3368 lapic->pci_bus_handle,
3369 ntb->peer_msix_data[i].nmd_ofs,
3370 ntb->peer_msix_data[i].nmd_data);
3371 }
3372 return;
3373 }
3374
3375 if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
3376 intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bits);
3377 return;
3378 }
3379
3380 if (ntb->type == NTB_XEON_GEN3) {
3381 while (bits != 0) {
3382 db = ffsll(bits);
3383
3384 intel_ntb_reg_write(1,
3385 ntb->peer_reg->db_bell + (db - 1) * 4, 0x1);
3386
3387 bits = bits & (bits - 1);
3388 }
3389 } else {
3390 db_iowrite(ntb, ntb->peer_reg->db_bell, bits);
3391 }
3392 }
3393
3394 static int
intel_ntb_peer_db_addr(device_t dev,bus_addr_t * db_addr,vm_size_t * db_size)3395 intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
3396 {
3397 struct ntb_softc *ntb = device_get_softc(dev);
3398 struct ntb_pci_bar_info *bar;
3399 uint64_t regoff;
3400
3401 KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL"));
3402
3403 if (!HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
3404 bar = &ntb->bar_info[NTB_CONFIG_BAR];
3405 regoff = ntb->peer_reg->db_bell;
3406 } else {
3407 KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED,
3408 ("invalid b2b idx"));
3409
3410 bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
3411 regoff = XEON_PDOORBELL_OFFSET;
3412 }
3413 KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh"));
3414
3415 /* HACK: Specific to current x86 bus implementation. */
3416 *db_addr = ((uint64_t)bar->pci_bus_handle + regoff);
3417 *db_size = ntb->reg->db_size;
3418 return (0);
3419 }
3420
3421 static uint64_t
intel_ntb_db_valid_mask(device_t dev)3422 intel_ntb_db_valid_mask(device_t dev)
3423 {
3424 struct ntb_softc *ntb = device_get_softc(dev);
3425
3426 return (ntb->db_valid_mask);
3427 }
3428
3429 static int
intel_ntb_db_vector_count(device_t dev)3430 intel_ntb_db_vector_count(device_t dev)
3431 {
3432 struct ntb_softc *ntb = device_get_softc(dev);
3433
3434 return (ntb->db_vec_count);
3435 }
3436
3437 static uint64_t
intel_ntb_db_vector_mask(device_t dev,uint32_t vector)3438 intel_ntb_db_vector_mask(device_t dev, uint32_t vector)
3439 {
3440 struct ntb_softc *ntb = device_get_softc(dev);
3441
3442 if (vector > ntb->db_vec_count)
3443 return (0);
3444 return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector));
3445 }
3446
3447 static bool
intel_ntb_link_is_up(device_t dev,enum ntb_speed * speed,enum ntb_width * width)3448 intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
3449 {
3450 struct ntb_softc *ntb = device_get_softc(dev);
3451
3452 if (speed != NULL)
3453 *speed = intel_ntb_link_sta_speed(ntb);
3454 if (width != NULL)
3455 *width = intel_ntb_link_sta_width(ntb);
3456 return (link_is_up(ntb));
3457 }
3458
3459 static void
save_bar_parameters(struct ntb_pci_bar_info * bar)3460 save_bar_parameters(struct ntb_pci_bar_info *bar)
3461 {
3462
3463 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
3464 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
3465 bar->pbase = rman_get_start(bar->pci_resource);
3466 bar->size = rman_get_size(bar->pci_resource);
3467 bar->vbase = rman_get_virtual(bar->pci_resource);
3468 }
3469
3470 static device_method_t ntb_intel_methods[] = {
3471 /* Device interface */
3472 DEVMETHOD(device_probe, intel_ntb_probe),
3473 DEVMETHOD(device_attach, intel_ntb_attach),
3474 DEVMETHOD(device_detach, intel_ntb_detach),
3475 /* Bus interface */
3476 DEVMETHOD(bus_child_location, ntb_child_location),
3477 DEVMETHOD(bus_print_child, ntb_print_child),
3478 DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag),
3479 /* NTB interface */
3480 DEVMETHOD(ntb_port_number, intel_ntb_port_number),
3481 DEVMETHOD(ntb_peer_port_count, intel_ntb_peer_port_count),
3482 DEVMETHOD(ntb_peer_port_number, intel_ntb_peer_port_number),
3483 DEVMETHOD(ntb_peer_port_idx, intel_ntb_peer_port_idx),
3484 DEVMETHOD(ntb_link_is_up, intel_ntb_link_is_up),
3485 DEVMETHOD(ntb_link_enable, intel_ntb_link_enable),
3486 DEVMETHOD(ntb_link_disable, intel_ntb_link_disable),
3487 DEVMETHOD(ntb_link_enabled, intel_ntb_link_enabled),
3488 DEVMETHOD(ntb_mw_count, intel_ntb_mw_count),
3489 DEVMETHOD(ntb_mw_get_range, intel_ntb_mw_get_range),
3490 DEVMETHOD(ntb_mw_set_trans, intel_ntb_mw_set_trans),
3491 DEVMETHOD(ntb_mw_clear_trans, intel_ntb_mw_clear_trans),
3492 DEVMETHOD(ntb_mw_get_wc, intel_ntb_mw_get_wc),
3493 DEVMETHOD(ntb_mw_set_wc, intel_ntb_mw_set_wc),
3494 DEVMETHOD(ntb_spad_count, intel_ntb_spad_count),
3495 DEVMETHOD(ntb_spad_clear, intel_ntb_spad_clear),
3496 DEVMETHOD(ntb_spad_write, intel_ntb_spad_write),
3497 DEVMETHOD(ntb_spad_read, intel_ntb_spad_read),
3498 DEVMETHOD(ntb_peer_spad_write, intel_ntb_peer_spad_write),
3499 DEVMETHOD(ntb_peer_spad_read, intel_ntb_peer_spad_read),
3500 DEVMETHOD(ntb_db_valid_mask, intel_ntb_db_valid_mask),
3501 DEVMETHOD(ntb_db_vector_count, intel_ntb_db_vector_count),
3502 DEVMETHOD(ntb_db_vector_mask, intel_ntb_db_vector_mask),
3503 DEVMETHOD(ntb_db_clear, intel_ntb_db_clear),
3504 DEVMETHOD(ntb_db_clear_mask, intel_ntb_db_clear_mask),
3505 DEVMETHOD(ntb_db_read, intel_ntb_db_read),
3506 DEVMETHOD(ntb_db_set_mask, intel_ntb_db_set_mask),
3507 DEVMETHOD(ntb_peer_db_addr, intel_ntb_peer_db_addr),
3508 DEVMETHOD(ntb_peer_db_set, intel_ntb_peer_db_set),
3509 DEVMETHOD_END
3510 };
3511
3512 static DEFINE_CLASS_0(ntb_hw, ntb_intel_driver, ntb_intel_methods,
3513 sizeof(struct ntb_softc));
3514 DRIVER_MODULE(ntb_hw_intel, pci, ntb_intel_driver, NULL, NULL);
3515 MODULE_DEPEND(ntb_hw_intel, ntb, 1, 1, 1);
3516 MODULE_VERSION(ntb_hw_intel, 1);
3517 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ntb_hw_intel, pci_ids,
3518 nitems(pci_ids));
3519