xref: /dpdk/drivers/raw/ntb/ntb_hw_intel.c (revision 403f21fe)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation.
3  */
4 #include <stdint.h>
5 #include <stdio.h>
6 #include <errno.h>
7 
8 #include <rte_io.h>
9 #include <rte_eal.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_rawdev.h>
13 #include <rte_rawdev_pmd.h>
14 
15 #include "ntb.h"
16 #include "ntb_hw_intel.h"
17 
18 enum xeon_ntb_bar {
19 	XEON_NTB_BAR23 = 2,
20 	XEON_NTB_BAR45 = 4,
21 };
22 
23 static enum xeon_ntb_bar intel_ntb_bar[] = {
24 	XEON_NTB_BAR23,
25 	XEON_NTB_BAR45,
26 };
27 
28 static inline int
is_gen3_ntb(const struct ntb_hw * hw)29 is_gen3_ntb(const struct ntb_hw *hw)
30 {
31 	if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_SKX)
32 		return 1;
33 
34 	return 0;
35 }
36 
37 static inline int
is_gen4_ntb(const struct ntb_hw * hw)38 is_gen4_ntb(const struct ntb_hw *hw)
39 {
40 	if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_ICX)
41 		return 1;
42 
43 	return 0;
44 }
45 
46 static int
intel_ntb3_check_ppd(struct ntb_hw * hw)47 intel_ntb3_check_ppd(struct ntb_hw *hw)
48 {
49 	uint8_t reg_val;
50 	int ret;
51 
52 	ret = rte_pci_read_config(hw->pci_dev, &reg_val,
53 				  sizeof(reg_val), XEON_PPD_OFFSET);
54 	if (ret < 0) {
55 		NTB_LOG(ERR, "Cannot get NTB PPD (PCIe port definition).");
56 		return -EIO;
57 	}
58 
59 	/* Check connection topo type. Only support B2B. */
60 	switch (reg_val & XEON_PPD_CONN_MASK) {
61 	case XEON_PPD_CONN_B2B:
62 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
63 		break;
64 	case XEON_PPD_CONN_TRANSPARENT:
65 	case XEON_PPD_CONN_RP:
66 	default:
67 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
68 		return -EINVAL;
69 	}
70 
71 	/* Check device type. */
72 	if (reg_val & XEON_PPD_DEV_DSD) {
73 		NTB_LOG(INFO, "DSD, Downstream Device.");
74 		hw->topo = NTB_TOPO_B2B_DSD;
75 	} else {
76 		NTB_LOG(INFO, "USD, Upstream device.");
77 		hw->topo = NTB_TOPO_B2B_USD;
78 	}
79 
80 	/* Check if bar4 is split. Do not support split bar. */
81 	if (reg_val & XEON_PPD_SPLIT_BAR_MASK) {
82 		NTB_LOG(ERR, "Do not support split bar.");
83 		return -EINVAL;
84 	}
85 
86 	return 0;
87 }
88 
89 static int
intel_ntb4_check_ppd(struct ntb_hw * hw)90 intel_ntb4_check_ppd(struct ntb_hw *hw)
91 {
92 	uint32_t reg_val;
93 
94 	reg_val = rte_read32(hw->hw_addr + XEON_GEN4_PPD1_OFFSET);
95 
96 	/* Check connection topo type. Only support B2B. */
97 	switch (reg_val & XEON_GEN4_PPD_CONN_MASK) {
98 	case XEON_GEN4_PPD_CONN_B2B:
99 		NTB_LOG(INFO, "Topo B2B (back to back) is using.");
100 		break;
101 	default:
102 		NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
103 		return -EINVAL;
104 	}
105 
106 	/* Check device type. */
107 	if (reg_val & XEON_GEN4_PPD_DEV_DSD) {
108 		NTB_LOG(INFO, "DSD, Downstream Device.");
109 		hw->topo = NTB_TOPO_B2B_DSD;
110 	} else {
111 		NTB_LOG(INFO, "USD, Upstream device.");
112 		hw->topo = NTB_TOPO_B2B_USD;
113 	}
114 
115 	return 0;
116 }
117 
118 static int
intel_ntb_dev_init(const struct rte_rawdev * dev)119 intel_ntb_dev_init(const struct rte_rawdev *dev)
120 {
121 	struct ntb_hw *hw = dev->dev_private;
122 	uint8_t bar;
123 	int ret, i;
124 
125 	if (hw == NULL) {
126 		NTB_LOG(ERR, "Invalid device.");
127 		return -EINVAL;
128 	}
129 
130 	hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
131 
132 	if (is_gen3_ntb(hw))
133 		ret = intel_ntb3_check_ppd(hw);
134 	else if (is_gen4_ntb(hw))
135 		/* PPD is in MMIO but not config space for NTB Gen4 */
136 		ret = intel_ntb4_check_ppd(hw);
137 	else {
138 		NTB_LOG(ERR, "Cannot init device for unsupported device.");
139 		return -ENOTSUP;
140 	}
141 
142 	if (ret)
143 		return ret;
144 
145 	hw->mw_cnt = XEON_MW_COUNT;
146 	hw->db_cnt = XEON_DB_COUNT;
147 	hw->spad_cnt = XEON_SPAD_COUNT;
148 
149 	hw->mw_size = rte_zmalloc("ntb_mw_size",
150 				  hw->mw_cnt * sizeof(uint64_t), 0);
151 	if (hw->mw_size == NULL) {
152 		NTB_LOG(ERR, "Cannot allocate memory for mw size.");
153 		return -ENOMEM;
154 	}
155 
156 	for (i = 0; i < hw->mw_cnt; i++) {
157 		bar = intel_ntb_bar[i];
158 		hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
159 	}
160 
161 	/* Reserve the last 2 spad registers for users. */
162 	for (i = 0; i < NTB_SPAD_USER_MAX_NUM; i++)
163 		hw->spad_user_list[i] = hw->spad_cnt;
164 	hw->spad_user_list[0] = hw->spad_cnt - 2;
165 	hw->spad_user_list[1] = hw->spad_cnt - 1;
166 
167 	return 0;
168 }
169 
170 static void *
intel_ntb_get_peer_mw_addr(const struct rte_rawdev * dev,int mw_idx)171 intel_ntb_get_peer_mw_addr(const struct rte_rawdev *dev, int mw_idx)
172 {
173 	struct ntb_hw *hw = dev->dev_private;
174 	uint8_t bar;
175 
176 	if (hw == NULL) {
177 		NTB_LOG(ERR, "Invalid device.");
178 		return 0;
179 	}
180 
181 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
182 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
183 			hw->mw_cnt - 1);
184 		return 0;
185 	}
186 
187 	bar = intel_ntb_bar[mw_idx];
188 
189 	return hw->pci_dev->mem_resource[bar].addr;
190 }
191 
192 static int
intel_ntb_mw_set_trans(const struct rte_rawdev * dev,int mw_idx,uint64_t addr,uint64_t size)193 intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
194 		       uint64_t addr, uint64_t size)
195 {
196 	struct ntb_hw *hw = dev->dev_private;
197 	void *xlat_addr, *limit_addr;
198 	uint64_t xlat_off, limit_off;
199 	uint64_t base, limit;
200 	uint8_t bar;
201 
202 	if (hw == NULL) {
203 		NTB_LOG(ERR, "Invalid device.");
204 		return -EINVAL;
205 	}
206 
207 	if (mw_idx < 0 || mw_idx >= hw->mw_cnt) {
208 		NTB_LOG(ERR, "Invalid memory window index (0 - %u).",
209 			hw->mw_cnt - 1);
210 		return -EINVAL;
211 	}
212 
213 	bar = intel_ntb_bar[mw_idx];
214 
215 	xlat_off = XEON_IMBAR1XBASE_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
216 	limit_off = XEON_IMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
217 	xlat_addr = hw->hw_addr + xlat_off;
218 	limit_addr = hw->hw_addr + limit_off;
219 
220 	/* Limit reg val should be EMBAR base address plus MW size. */
221 	base = addr;
222 	limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
223 	rte_write64(base, xlat_addr);
224 	rte_write64(limit, limit_addr);
225 
226 	if (is_gen3_ntb(hw)) {
227 		/* Setup the external point so that remote can access. */
228 		xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
229 		xlat_addr = hw->hw_addr + xlat_off;
230 		limit_off = XEON_EMBAR1XLMT_OFFSET +
231 			    mw_idx * XEON_BAR_INTERVAL_OFFSET;
232 		limit_addr = hw->hw_addr + limit_off;
233 		base = rte_read64(xlat_addr);
234 		base &= ~0xf;
235 		limit = base + size;
236 		rte_write64(limit, limit_addr);
237 	} else if (is_gen4_ntb(hw)) {
238 		/* Set translate base address index register */
239 		xlat_off = XEON_GEN4_IM1XBASEIDX_OFFSET +
240 			   mw_idx * XEON_GEN4_XBASEIDX_INTERVAL;
241 		xlat_addr = hw->hw_addr + xlat_off;
242 		rte_write16(rte_log2_u64(size), xlat_addr);
243 	} else {
244 		NTB_LOG(ERR, "Cannot set translation of memory windows for unsupported device.");
245 		rte_write64(base, limit_addr);
246 		rte_write64(0, xlat_addr);
247 		return -ENOTSUP;
248 	}
249 
250 	return 0;
251 }
252 
253 static void *
intel_ntb_ioremap(const struct rte_rawdev * dev,uint64_t addr)254 intel_ntb_ioremap(const struct rte_rawdev *dev, uint64_t addr)
255 {
256 	struct ntb_hw *hw = dev->dev_private;
257 	void *mapped = NULL;
258 	void *base;
259 	int i;
260 
261 	for (i = 0; i < hw->peer_used_mws; i++) {
262 		if (addr >= hw->peer_mw_base[i] &&
263 		    addr <= hw->peer_mw_base[i] + hw->mw_size[i]) {
264 			base = intel_ntb_get_peer_mw_addr(dev, i);
265 			mapped = (void *)(size_t)(addr - hw->peer_mw_base[i] +
266 				 (size_t)base);
267 			break;
268 		}
269 	}
270 
271 	return mapped;
272 }
273 
274 static int
intel_ntb_get_link_status(const struct rte_rawdev * dev)275 intel_ntb_get_link_status(const struct rte_rawdev *dev)
276 {
277 	struct ntb_hw *hw = dev->dev_private;
278 	uint16_t reg_val, reg_off;
279 	int ret;
280 
281 	if (hw == NULL) {
282 		NTB_LOG(ERR, "Invalid device.");
283 		return -EINVAL;
284 	}
285 
286 	if (is_gen3_ntb(hw)) {
287 		reg_off = XEON_GEN3_LINK_STATUS_OFFSET;
288 		ret = rte_pci_read_config(hw->pci_dev, &reg_val,
289 					  sizeof(reg_val), reg_off);
290 		if (ret < 0) {
291 			NTB_LOG(ERR, "Unable to get link status.");
292 			return -EIO;
293 		}
294 	} else if (is_gen4_ntb(hw)) {
295 		reg_off = XEON_GEN4_LINK_STATUS_OFFSET;
296 		reg_val = rte_read16(hw->hw_addr + reg_off);
297 	} else {
298 		NTB_LOG(ERR, "Cannot get link status for unsupported device.");
299 		return -ENOTSUP;
300 	}
301 
302 	hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
303 
304 	if (hw->link_status) {
305 		hw->link_speed = NTB_LNK_STA_SPEED(reg_val);
306 		hw->link_width = NTB_LNK_STA_WIDTH(reg_val);
307 	} else {
308 		hw->link_speed = NTB_SPEED_NONE;
309 		hw->link_width = NTB_WIDTH_NONE;
310 	}
311 
312 	return 0;
313 }
314 
315 static int
intel_ntb_gen3_set_link(const struct ntb_hw * hw,bool up)316 intel_ntb_gen3_set_link(const struct ntb_hw *hw, bool up)
317 {
318 	uint32_t ntb_ctrl, reg_off;
319 	void *reg_addr;
320 
321 	reg_off = XEON_NTBCNTL_OFFSET;
322 	reg_addr = hw->hw_addr + reg_off;
323 	ntb_ctrl = rte_read32(reg_addr);
324 
325 	if (up) {
326 		ntb_ctrl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
327 		ntb_ctrl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
328 		ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
329 	} else {
330 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
331 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
332 		ntb_ctrl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
333 	}
334 
335 	rte_write32(ntb_ctrl, reg_addr);
336 
337 	return 0;
338 }
339 
340 static int
intel_ntb_gen4_set_link(const struct ntb_hw * hw,bool up)341 intel_ntb_gen4_set_link(const struct ntb_hw *hw, bool up)
342 {
343 	uint32_t ntb_ctrl, ppd0;
344 	uint16_t link_ctrl;
345 	void *reg_addr;
346 
347 	if (up) {
348 		reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
349 		ntb_ctrl = NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
350 		ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
351 		rte_write32(ntb_ctrl, reg_addr);
352 
353 		reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
354 		link_ctrl = rte_read16(reg_addr);
355 		link_ctrl &= ~XEON_GEN4_LINK_CTRL_LINK_DIS;
356 		rte_write16(link_ctrl, reg_addr);
357 
358 		/* start link training */
359 		reg_addr = hw->hw_addr + XEON_GEN4_PPD0_OFFSET;
360 		ppd0 = rte_read32(reg_addr);
361 		ppd0 |= XEON_GEN4_PPD_LINKTRN;
362 		rte_write32(ppd0, reg_addr);
363 
364 		/* make sure link training has started */
365 		ppd0 = rte_read32(reg_addr);
366 		if (!(ppd0 & XEON_GEN4_PPD_LINKTRN)) {
367 			NTB_LOG(ERR, "Link is not training.");
368 			return -EINVAL;
369 		}
370 	} else {
371 		reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
372 		ntb_ctrl = rte_read32(reg_addr);
373 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
374 		ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
375 		rte_write32(ntb_ctrl, reg_addr);
376 
377 		reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
378 		link_ctrl = rte_read16(reg_addr);
379 		link_ctrl |= XEON_GEN4_LINK_CTRL_LINK_DIS;
380 		rte_write16(link_ctrl, reg_addr);
381 	}
382 
383 	return 0;
384 }
385 
386 static int
intel_ntb_set_link(const struct rte_rawdev * dev,bool up)387 intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
388 {
389 	struct ntb_hw *hw = dev->dev_private;
390 	int ret = 0;
391 
392 	if (is_gen3_ntb(hw))
393 		ret = intel_ntb_gen3_set_link(hw, up);
394 	else if (is_gen4_ntb(hw))
395 		ret = intel_ntb_gen4_set_link(hw, up);
396 	else {
397 		NTB_LOG(ERR, "Cannot set link for unsupported device.");
398 		ret = -ENOTSUP;
399 	}
400 
401 	return ret;
402 }
403 
404 static uint32_t
intel_ntb_spad_read(const struct rte_rawdev * dev,int spad,bool peer)405 intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
406 {
407 	struct ntb_hw *hw = dev->dev_private;
408 	uint32_t spad_v, reg_off;
409 	void *reg_addr;
410 
411 	if (spad < 0 || spad >= hw->spad_cnt) {
412 		NTB_LOG(ERR, "Invalid spad reg index.");
413 		return 0;
414 	}
415 
416 	/* When peer is true, read peer spad reg */
417 	if (is_gen3_ntb(hw))
418 		reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
419 				XEON_IM_SPAD_OFFSET;
420 	else if (is_gen4_ntb(hw))
421 		reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
422 				XEON_IM_SPAD_OFFSET;
423 	else {
424 		NTB_LOG(ERR, "Cannot read spad for unsupported device.");
425 		return -ENOTSUP;
426 	}
427 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
428 	spad_v = rte_read32(reg_addr);
429 
430 	return spad_v;
431 }
432 
433 static int
intel_ntb_spad_write(const struct rte_rawdev * dev,int spad,bool peer,uint32_t spad_v)434 intel_ntb_spad_write(const struct rte_rawdev *dev, int spad,
435 		     bool peer, uint32_t spad_v)
436 {
437 	struct ntb_hw *hw = dev->dev_private;
438 	uint32_t reg_off;
439 	void *reg_addr;
440 
441 	if (spad < 0 || spad >= hw->spad_cnt) {
442 		NTB_LOG(ERR, "Invalid spad reg index.");
443 		return -EINVAL;
444 	}
445 
446 	/* When peer is true, write peer spad reg */
447 	if (is_gen3_ntb(hw))
448 		reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
449 				XEON_IM_SPAD_OFFSET;
450 	else if (is_gen4_ntb(hw))
451 		reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
452 				XEON_IM_SPAD_OFFSET;
453 	else {
454 		NTB_LOG(ERR, "Cannot write spad for unsupported device.");
455 		return -ENOTSUP;
456 	}
457 	reg_addr = hw->hw_addr + reg_off + (spad << 2);
458 
459 	rte_write32(spad_v, reg_addr);
460 
461 	return 0;
462 }
463 
464 static uint64_t
intel_ntb_db_read(const struct rte_rawdev * dev)465 intel_ntb_db_read(const struct rte_rawdev *dev)
466 {
467 	struct ntb_hw *hw = dev->dev_private;
468 	uint64_t db_off, db_bits;
469 	void *db_addr;
470 
471 	db_off = XEON_IM_INT_STATUS_OFFSET;
472 	db_addr = hw->hw_addr + db_off;
473 
474 	db_bits = rte_read64(db_addr);
475 
476 	return db_bits;
477 }
478 
479 static int
intel_ntb_db_clear(const struct rte_rawdev * dev,uint64_t db_bits)480 intel_ntb_db_clear(const struct rte_rawdev *dev, uint64_t db_bits)
481 {
482 	struct ntb_hw *hw = dev->dev_private;
483 	uint64_t db_off;
484 	void *db_addr;
485 
486 	db_off = XEON_IM_INT_STATUS_OFFSET;
487 	db_addr = hw->hw_addr + db_off;
488 
489 	if (is_gen4_ntb(hw))
490 		rte_write16(XEON_GEN4_SLOTSTS_DLLSCS,
491 			    hw->hw_addr + XEON_GEN4_SLOTSTS);
492 	rte_write64(db_bits, db_addr);
493 
494 	return 0;
495 }
496 
497 static int
intel_ntb_db_set_mask(const struct rte_rawdev * dev,uint64_t db_mask)498 intel_ntb_db_set_mask(const struct rte_rawdev *dev, uint64_t db_mask)
499 {
500 	struct ntb_hw *hw = dev->dev_private;
501 	uint64_t db_m_off;
502 	void *db_m_addr;
503 
504 	db_m_off = XEON_IM_INT_DISABLE_OFFSET;
505 	db_m_addr = hw->hw_addr + db_m_off;
506 
507 	db_mask |= hw->db_mask;
508 
509 	rte_write64(db_mask, db_m_addr);
510 
511 	hw->db_mask = db_mask;
512 
513 	return 0;
514 }
515 
516 static int
intel_ntb_peer_db_set(const struct rte_rawdev * dev,uint8_t db_idx)517 intel_ntb_peer_db_set(const struct rte_rawdev *dev, uint8_t db_idx)
518 {
519 	struct ntb_hw *hw = dev->dev_private;
520 	uint32_t db_off;
521 	void *db_addr;
522 
523 	if (((uint64_t)1 << db_idx) & ~hw->db_valid_mask) {
524 		NTB_LOG(ERR, "Invalid doorbell.");
525 		return -EINVAL;
526 	}
527 
528 	db_off = XEON_IM_DOORBELL_OFFSET + db_idx * 4;
529 	db_addr = hw->hw_addr + db_off;
530 
531 	rte_write32(1, db_addr);
532 
533 	return 0;
534 }
535 
536 static int
intel_ntb_vector_bind(const struct rte_rawdev * dev,uint8_t intr,uint8_t msix)537 intel_ntb_vector_bind(const struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
538 {
539 	struct ntb_hw *hw = dev->dev_private;
540 	uint8_t reg_off;
541 	void *reg_addr;
542 
543 	if (intr >= hw->db_cnt) {
544 		NTB_LOG(ERR, "Invalid intr source.");
545 		return -EINVAL;
546 	}
547 
548 	/* Bind intr source to msix vector */
549 	if (is_gen3_ntb(hw))
550 		reg_off = XEON_GEN3_INTVEC_OFFSET;
551 	else if (is_gen4_ntb(hw))
552 		reg_off = XEON_GEN4_INTVEC_OFFSET;
553 	else {
554 		NTB_LOG(ERR, "Cannot bind vectors for unsupported device.");
555 		return -ENOTSUP;
556 	}
557 	reg_addr = hw->hw_addr + reg_off + intr;
558 
559 	rte_write8(msix, reg_addr);
560 
561 	return 0;
562 }
563 
564 /* operations for primary side of local ntb */
565 const struct ntb_dev_ops intel_ntb_ops = {
566 	.ntb_dev_init       = intel_ntb_dev_init,
567 	.get_peer_mw_addr   = intel_ntb_get_peer_mw_addr,
568 	.mw_set_trans       = intel_ntb_mw_set_trans,
569 	.ioremap            = intel_ntb_ioremap,
570 	.get_link_status    = intel_ntb_get_link_status,
571 	.set_link           = intel_ntb_set_link,
572 	.spad_read          = intel_ntb_spad_read,
573 	.spad_write         = intel_ntb_spad_write,
574 	.db_read            = intel_ntb_db_read,
575 	.db_clear           = intel_ntb_db_clear,
576 	.db_set_mask        = intel_ntb_db_set_mask,
577 	.peer_db_set        = intel_ntb_peer_db_set,
578 	.vector_bind        = intel_ntb_vector_bind,
579 };
580