xref: /dpdk/drivers/event/dlb2/pf/dlb2_main.c (revision d4a06a39)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <assert.h>
10 #include <unistd.h>
11 #include <string.h>
12 
13 #include <rte_malloc.h>
14 #include <rte_errno.h>
15 
16 #include "base/dlb2_regs.h"
17 #include "base/dlb2_hw_types.h"
18 #include "base/dlb2_resource.h"
19 #include "base/dlb2_osdep.h"
20 #include "dlb2_main.h"
21 #include "../dlb2_user.h"
22 #include "../dlb2_priv.h"
23 #include "../dlb2_iface.h"
24 #include "../dlb2_inline_fns.h"
25 
26 #define PF_ID_ZERO 0	/* PF ONLY! */
27 #define NO_OWNER_VF 0	/* PF ONLY! */
28 #define NOT_VF_REQ false /* PF ONLY! */
29 
30 #define DLB2_PCI_CAP_POINTER 0x34
31 #define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
32 #define DLB2_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
33 
34 #define DLB2_PCI_LNKCTL 16
35 #define DLB2_PCI_SLTCTL 24
36 #define DLB2_PCI_RTCTL 28
37 #define DLB2_PCI_EXP_DEVCTL2 40
38 #define DLB2_PCI_LNKCTL2 48
39 #define DLB2_PCI_SLTCTL2 56
40 #define DLB2_PCI_CMD 4
41 #define DLB2_PCI_EXP_DEVSTA 10
42 #define DLB2_PCI_EXP_DEVSTA_TRPND 0x20
43 #define DLB2_PCI_EXP_DEVCTL_BCR_FLR 0x8000
44 
45 #define DLB2_PCI_CAP_ID_EXP       0x10
46 #define DLB2_PCI_CAP_ID_MSIX      0x11
47 #define DLB2_PCI_EXT_CAP_ID_PRI   0x13
48 #define DLB2_PCI_EXT_CAP_ID_ACS   0xD
49 
50 #define DLB2_PCI_PRI_CTRL_ENABLE         0x1
51 #define DLB2_PCI_PRI_ALLOC_REQ           0xC
52 #define DLB2_PCI_PRI_CTRL                0x4
53 #define DLB2_PCI_MSIX_FLAGS              0x2
54 #define DLB2_PCI_MSIX_FLAGS_ENABLE       0x8000
55 #define DLB2_PCI_MSIX_FLAGS_MASKALL      0x4000
56 #define DLB2_PCI_ERR_ROOT_STATUS         0x30
57 #define DLB2_PCI_ERR_COR_STATUS          0x10
58 #define DLB2_PCI_ERR_UNCOR_STATUS        0x4
59 #define DLB2_PCI_COMMAND_INTX_DISABLE    0x400
60 #define DLB2_PCI_ACS_CAP                 0x4
61 #define DLB2_PCI_ACS_CTRL                0x6
62 #define DLB2_PCI_ACS_SV                  0x1
63 #define DLB2_PCI_ACS_RR                  0x4
64 #define DLB2_PCI_ACS_CR                  0x8
65 #define DLB2_PCI_ACS_UF                  0x10
66 #define DLB2_PCI_ACS_EC                  0x20
67 
dlb2_pci_find_capability(struct rte_pci_device * pdev,uint32_t id)68 static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
69 {
70 	uint8_t pos;
71 	int ret;
72 	uint16_t hdr;
73 
74 	ret = rte_pci_read_config(pdev, &pos, 1, DLB2_PCI_CAP_POINTER);
75 	pos &= 0xFC;
76 
77 	if (ret != 1)
78 		return -1;
79 
80 	while (pos > 0x3F) {
81 		ret = rte_pci_read_config(pdev, &hdr, 2, pos);
82 		if (ret != 2)
83 			return -1;
84 
85 		if (DLB2_PCI_CAP_ID(hdr) == id)
86 			return pos;
87 
88 		if (DLB2_PCI_CAP_ID(hdr) == 0xFF)
89 			return -1;
90 
91 		pos = DLB2_PCI_CAP_NEXT(hdr);
92 	}
93 
94 	return -1;
95 }
96 
97 static int
dlb2_pf_init_driver_state(struct dlb2_dev * dlb2_dev)98 dlb2_pf_init_driver_state(struct dlb2_dev *dlb2_dev)
99 {
100 	rte_spinlock_init(&dlb2_dev->resource_mutex);
101 
102 	return 0;
103 }
104 
dlb2_pf_enable_pm(struct dlb2_dev * dlb2_dev)105 static void dlb2_pf_enable_pm(struct dlb2_dev *dlb2_dev)
106 {
107 	int version;
108 	version = DLB2_HW_DEVICE_FROM_PCI_ID(dlb2_dev->pdev);
109 
110 	dlb2_clr_pmcsr_disable(&dlb2_dev->hw, version);
111 }
112 
113 #define DLB2_READY_RETRY_LIMIT 1000
dlb2_pf_wait_for_device_ready(struct dlb2_dev * dlb2_dev,int dlb_version)114 static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev,
115 					 int dlb_version)
116 {
117 	u32 retries = 0;
118 
119 	/* Allow at least 1s for the device to become active after power-on */
120 	for (retries = 0; retries < DLB2_READY_RETRY_LIMIT; retries++) {
121 		u32 idle_val;
122 		u32 idle_dlb_func_idle;
123 		u32 pm_st_val;
124 		u32 pm_st_pmsm;
125 		u32 addr;
126 
127 		addr = DLB2_CM_CFG_PM_STATUS(dlb_version);
128 		pm_st_val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
129 		addr = DLB2_CM_CFG_DIAGNOSTIC_IDLE_STATUS(dlb_version);
130 		idle_val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
131 		idle_dlb_func_idle = idle_val &
132 			DLB2_CM_CFG_DIAGNOSTIC_IDLE_STATUS_DLB_FUNC_IDLE;
133 		pm_st_pmsm = pm_st_val & DLB2_CM_CFG_PM_STATUS_PMSM;
134 		if (pm_st_pmsm && idle_dlb_func_idle)
135 			break;
136 
137 		rte_delay_ms(1);
138 	};
139 
140 	if (retries == DLB2_READY_RETRY_LIMIT) {
141 		DLB2_LOG_ERR("[%s()] wait for device ready timed out\n",
142 		       __func__);
143 		return -1;
144 	}
145 
146 	return 0;
147 }
148 
149 struct dlb2_dev *
dlb2_probe(struct rte_pci_device * pdev)150 dlb2_probe(struct rte_pci_device *pdev)
151 {
152 	struct dlb2_dev *dlb2_dev;
153 	int ret = 0;
154 	int dlb_version = 0;
155 
156 	DLB2_INFO(dlb2_dev, "probe\n");
157 
158 	dlb2_dev = rte_malloc("DLB2_PF", sizeof(struct dlb2_dev),
159 			      RTE_CACHE_LINE_SIZE);
160 
161 	if (dlb2_dev == NULL) {
162 		ret = -ENOMEM;
163 		goto dlb2_dev_malloc_fail;
164 	}
165 
166 	dlb_version = DLB2_HW_DEVICE_FROM_PCI_ID(pdev);
167 
168 	/* PCI Bus driver has already mapped bar space into process.
169 	 * Save off our IO register and FUNC addresses.
170 	 */
171 
172 	/* BAR 0 */
173 	if (pdev->mem_resource[0].addr == NULL) {
174 		DLB2_ERR(dlb2_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
175 		ret = -EINVAL;
176 		goto pci_mmap_bad_addr;
177 	}
178 	dlb2_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
179 	dlb2_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
180 
181 	DLB2_INFO(dlb2_dev, "DLB2 FUNC VA=%p, PA=%p, len=%p\n",
182 		  (void *)dlb2_dev->hw.func_kva,
183 		  (void *)dlb2_dev->hw.func_phys_addr,
184 		  (void *)(pdev->mem_resource[0].len));
185 
186 	/* BAR 2 */
187 	if (pdev->mem_resource[2].addr == NULL) {
188 		DLB2_ERR(dlb2_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
189 		ret = -EINVAL;
190 		goto pci_mmap_bad_addr;
191 	}
192 	dlb2_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
193 	dlb2_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
194 
195 	DLB2_INFO(dlb2_dev, "DLB2 CSR VA=%p, PA=%p, len=%p\n",
196 		  (void *)dlb2_dev->hw.csr_kva,
197 		  (void *)dlb2_dev->hw.csr_phys_addr,
198 		  (void *)(pdev->mem_resource[2].len));
199 
200 	dlb2_dev->pdev = pdev;
201 
202 	/* PM enable must be done before any other MMIO accesses, and this
203 	 * setting is persistent across device reset.
204 	 */
205 	dlb2_pf_enable_pm(dlb2_dev);
206 
207 	ret = dlb2_pf_wait_for_device_ready(dlb2_dev, dlb_version);
208 	if (ret)
209 		goto wait_for_device_ready_fail;
210 
211 	ret = dlb2_pf_reset(dlb2_dev);
212 	if (ret)
213 		goto dlb2_reset_fail;
214 
215 	ret = dlb2_pf_init_driver_state(dlb2_dev);
216 	if (ret)
217 		goto init_driver_state_fail;
218 
219 	ret = dlb2_resource_init(&dlb2_dev->hw, dlb_version);
220 	if (ret)
221 		goto resource_init_fail;
222 
223 	return dlb2_dev;
224 
225 resource_init_fail:
226 	dlb2_resource_free(&dlb2_dev->hw);
227 init_driver_state_fail:
228 dlb2_reset_fail:
229 pci_mmap_bad_addr:
230 wait_for_device_ready_fail:
231 	rte_free(dlb2_dev);
232 dlb2_dev_malloc_fail:
233 	rte_errno = ret;
234 	return NULL;
235 }
236 
237 int
dlb2_pf_reset(struct dlb2_dev * dlb2_dev)238 dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
239 {
240 	int ret = 0;
241 	int i = 0;
242 	uint32_t dword[16];
243 	uint16_t cmd;
244 	off_t off;
245 
246 	uint16_t dev_ctl_word;
247 	uint16_t dev_ctl2_word;
248 	uint16_t lnk_word;
249 	uint16_t lnk_word2;
250 	uint16_t slt_word;
251 	uint16_t slt_word2;
252 	uint16_t rt_ctl_word;
253 	uint32_t pri_reqs_dword;
254 	uint16_t pri_ctrl_word;
255 
256 	int pcie_cap_offset;
257 	int pri_cap_offset;
258 	int msix_cap_offset;
259 	int err_cap_offset;
260 	int acs_cap_offset;
261 	int wait_count;
262 
263 	uint16_t devsta_busy_word;
264 	uint16_t devctl_word;
265 
266 	struct rte_pci_device *pdev = dlb2_dev->pdev;
267 
268 	/* Save PCI config state */
269 
270 	for (i = 0; i < 16; i++) {
271 		if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
272 			return ret;
273 	}
274 
275 	pcie_cap_offset = dlb2_pci_find_capability(pdev, DLB2_PCI_CAP_ID_EXP);
276 
277 	if (pcie_cap_offset < 0) {
278 		DLB2_LOG_ERR("[%s()] failed to find the pcie capability\n",
279 		       __func__);
280 		return pcie_cap_offset;
281 	}
282 
283 	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
284 	if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
285 		dev_ctl_word = 0;
286 
287 	off = pcie_cap_offset + DLB2_PCI_LNKCTL;
288 	if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
289 		lnk_word = 0;
290 
291 	off = pcie_cap_offset + DLB2_PCI_SLTCTL;
292 	if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
293 		slt_word = 0;
294 
295 	off = pcie_cap_offset + DLB2_PCI_RTCTL;
296 	if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
297 		rt_ctl_word = 0;
298 
299 	off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
300 	if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
301 		dev_ctl2_word = 0;
302 
303 	off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
304 	if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
305 		lnk_word2 = 0;
306 
307 	off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
308 	if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
309 		slt_word2 = 0;
310 
311 	off = DLB2_PCI_EXT_CAP_ID_PRI;
312 	pri_cap_offset = rte_pci_find_ext_capability(pdev, off);
313 
314 	if (pri_cap_offset >= 0) {
315 		off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
316 		if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
317 			pri_reqs_dword = 0;
318 	}
319 
320 	/* clear the PCI command register before issuing the FLR */
321 
322 	off = DLB2_PCI_CMD;
323 	cmd = 0;
324 	if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
325 		DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
326 		       __func__);
327 		return ret;
328 	}
329 
330 	/* issue the FLR */
331 	for (wait_count = 0; wait_count < 4; wait_count++) {
332 		int sleep_time;
333 
334 		off = pcie_cap_offset + DLB2_PCI_EXP_DEVSTA;
335 		ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
336 		if (ret != 2) {
337 			DLB2_LOG_ERR("[%s()] failed to read the pci device status\n",
338 			       __func__);
339 			return ret;
340 		}
341 
342 		if (!(devsta_busy_word & DLB2_PCI_EXP_DEVSTA_TRPND))
343 			break;
344 
345 		sleep_time = (1 << (wait_count)) * 100;
346 		rte_delay_ms(sleep_time);
347 	}
348 
349 	if (wait_count == 4) {
350 		DLB2_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
351 		       __func__);
352 		return -1;
353 	}
354 
355 	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
356 	ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
357 	if (ret != 2) {
358 		DLB2_LOG_ERR("[%s()] failed to read the pcie device control\n",
359 		       __func__);
360 		return ret;
361 	}
362 
363 	devctl_word |= DLB2_PCI_EXP_DEVCTL_BCR_FLR;
364 
365 	ret = rte_pci_write_config(pdev, &devctl_word, 2, off);
366 	if (ret != 2) {
367 		DLB2_LOG_ERR("[%s()] failed to write the pcie device control\n",
368 		       __func__);
369 		return ret;
370 	}
371 
372 	rte_delay_ms(100);
373 
374 	/* Restore PCI config state */
375 
376 	if (pcie_cap_offset >= 0) {
377 		off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
378 		ret = rte_pci_write_config(pdev, &dev_ctl_word, 2, off);
379 		if (ret != 2) {
380 			DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
381 				__func__, (int)off);
382 			return ret;
383 		}
384 
385 		off = pcie_cap_offset + DLB2_PCI_LNKCTL;
386 		ret = rte_pci_write_config(pdev, &lnk_word, 2, off);
387 		if (ret != 2) {
388 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
389 				__func__, (int)off);
390 			return ret;
391 		}
392 
393 		off = pcie_cap_offset + DLB2_PCI_SLTCTL;
394 		ret = rte_pci_write_config(pdev, &slt_word, 2, off);
395 		if (ret != 2) {
396 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
397 				__func__, (int)off);
398 			return ret;
399 		}
400 
401 		off = pcie_cap_offset + DLB2_PCI_RTCTL;
402 		ret = rte_pci_write_config(pdev, &rt_ctl_word, 2, off);
403 		if (ret != 2) {
404 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
405 				__func__, (int)off);
406 			return ret;
407 		}
408 
409 		off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
410 		ret = rte_pci_write_config(pdev, &dev_ctl2_word, 2, off);
411 		if (ret != 2) {
412 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
413 				__func__, (int)off);
414 			return ret;
415 		}
416 
417 		off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
418 		ret = rte_pci_write_config(pdev, &lnk_word2, 2, off);
419 		if (ret != 2) {
420 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
421 				__func__, (int)off);
422 			return ret;
423 		}
424 
425 		off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
426 		ret = rte_pci_write_config(pdev, &slt_word2, 2, off);
427 		if (ret != 2) {
428 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
429 				__func__, (int)off);
430 			return ret;
431 		}
432 	}
433 
434 	if (pri_cap_offset >= 0) {
435 		pri_ctrl_word = DLB2_PCI_PRI_CTRL_ENABLE;
436 
437 		off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
438 		ret = rte_pci_write_config(pdev, &pri_reqs_dword, 4, off);
439 		if (ret != 4) {
440 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
441 				__func__, (int)off);
442 			return ret;
443 		}
444 
445 		off = pri_cap_offset + DLB2_PCI_PRI_CTRL;
446 		ret = rte_pci_write_config(pdev, &pri_ctrl_word, 2, off);
447 		if (ret != 2) {
448 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
449 				__func__, (int)off);
450 			return ret;
451 		}
452 	}
453 
454 	off = RTE_PCI_EXT_CAP_ID_ERR;
455 	err_cap_offset = rte_pci_find_ext_capability(pdev, off);
456 
457 	if (err_cap_offset >= 0) {
458 		uint32_t tmp;
459 
460 		off = err_cap_offset + DLB2_PCI_ERR_ROOT_STATUS;
461 		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
462 			tmp = 0;
463 
464 		ret = rte_pci_write_config(pdev, &tmp, 4, off);
465 		if (ret != 4) {
466 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
467 				__func__, (int)off);
468 			return ret;
469 		}
470 
471 		off = err_cap_offset + DLB2_PCI_ERR_COR_STATUS;
472 		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
473 			tmp = 0;
474 
475 		ret = rte_pci_write_config(pdev, &tmp, 4, off);
476 		if (ret != 4) {
477 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
478 				__func__, (int)off);
479 			return ret;
480 		}
481 
482 		off = err_cap_offset + DLB2_PCI_ERR_UNCOR_STATUS;
483 		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
484 			tmp = 0;
485 
486 		ret = rte_pci_write_config(pdev, &tmp, 4, off);
487 		if (ret != 4) {
488 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
489 				__func__, (int)off);
490 			return ret;
491 		}
492 	}
493 
494 	for (i = 16; i > 0; i--) {
495 		off = (i - 1) * 4;
496 		ret = rte_pci_write_config(pdev, &dword[i - 1], 4, off);
497 		if (ret != 4) {
498 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
499 				__func__, (int)off);
500 			return ret;
501 		}
502 	}
503 
504 	off = DLB2_PCI_CMD;
505 	if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
506 		cmd &= ~DLB2_PCI_COMMAND_INTX_DISABLE;
507 		if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
508 			DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
509 			       __func__);
510 			return ret;
511 		}
512 	}
513 
514 	msix_cap_offset = dlb2_pci_find_capability(pdev,
515 						   DLB2_PCI_CAP_ID_MSIX);
516 	if (msix_cap_offset >= 0) {
517 		off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
518 		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
519 			cmd |= DLB2_PCI_MSIX_FLAGS_ENABLE;
520 			cmd |= DLB2_PCI_MSIX_FLAGS_MASKALL;
521 			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
522 				DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
523 				       __func__);
524 				return ret;
525 			}
526 		}
527 
528 		off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
529 		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
530 			cmd &= ~DLB2_PCI_MSIX_FLAGS_MASKALL;
531 			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
532 				DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
533 				       __func__);
534 				return ret;
535 			}
536 		}
537 	}
538 
539 	off = DLB2_PCI_EXT_CAP_ID_ACS;
540 	acs_cap_offset = rte_pci_find_ext_capability(pdev, off);
541 
542 	if (acs_cap_offset >= 0) {
543 		uint16_t acs_cap, acs_ctrl, acs_mask;
544 		off = acs_cap_offset + DLB2_PCI_ACS_CAP;
545 		if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
546 			acs_cap = 0;
547 
548 		off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
549 		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
550 			acs_ctrl = 0;
551 
552 		acs_mask = DLB2_PCI_ACS_SV | DLB2_PCI_ACS_RR;
553 		acs_mask |= (DLB2_PCI_ACS_CR | DLB2_PCI_ACS_UF);
554 		acs_ctrl |= (acs_cap & acs_mask);
555 
556 		ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
557 		if (ret != 2) {
558 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
559 				__func__, (int)off);
560 			return ret;
561 		}
562 
563 		off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
564 		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
565 			acs_ctrl = 0;
566 
567 		acs_mask = DLB2_PCI_ACS_RR | DLB2_PCI_ACS_CR;
568 		acs_mask |= DLB2_PCI_ACS_EC;
569 		acs_ctrl &= ~acs_mask;
570 
571 		off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
572 		ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
573 		if (ret != 2) {
574 			DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
575 				__func__, (int)off);
576 			return ret;
577 		}
578 	}
579 
580 	return 0;
581 }
582 
583 int
dlb2_pf_create_sched_domain(struct dlb2_hw * hw,struct dlb2_create_sched_domain_args * args,struct dlb2_cmd_response * resp)584 dlb2_pf_create_sched_domain(struct dlb2_hw *hw,
585 			    struct dlb2_create_sched_domain_args *args,
586 			    struct dlb2_cmd_response *resp)
587 {
588 	return dlb2_hw_create_sched_domain(hw, args, resp, NOT_VF_REQ,
589 					   PF_ID_ZERO);
590 }
591 
592 int
dlb2_pf_reset_domain(struct dlb2_hw * hw,u32 id)593 dlb2_pf_reset_domain(struct dlb2_hw *hw, u32 id)
594 {
595 	return dlb2_reset_domain(hw, id, NOT_VF_REQ, PF_ID_ZERO);
596 }
597 
598 int
dlb2_pf_create_ldb_queue(struct dlb2_hw * hw,u32 id,struct dlb2_create_ldb_queue_args * args,struct dlb2_cmd_response * resp)599 dlb2_pf_create_ldb_queue(struct dlb2_hw *hw,
600 			 u32 id,
601 			 struct dlb2_create_ldb_queue_args *args,
602 			 struct dlb2_cmd_response *resp)
603 {
604 	return dlb2_hw_create_ldb_queue(hw, id, args, resp, NOT_VF_REQ,
605 					PF_ID_ZERO);
606 }
607 
608 int
dlb2_pf_create_ldb_port(struct dlb2_hw * hw,u32 id,struct dlb2_create_ldb_port_args * args,uintptr_t cq_dma_base,struct dlb2_cmd_response * resp)609 dlb2_pf_create_ldb_port(struct dlb2_hw *hw,
610 			u32 id,
611 			struct dlb2_create_ldb_port_args *args,
612 			uintptr_t cq_dma_base,
613 			struct dlb2_cmd_response *resp)
614 {
615 	return dlb2_hw_create_ldb_port(hw, id, args,
616 				       cq_dma_base,
617 				       resp,
618 				       NOT_VF_REQ,
619 				       PF_ID_ZERO);
620 }
621 
622 int
dlb2_pf_create_dir_port(struct dlb2_hw * hw,u32 id,struct dlb2_create_dir_port_args * args,uintptr_t cq_dma_base,struct dlb2_cmd_response * resp)623 dlb2_pf_create_dir_port(struct dlb2_hw *hw,
624 			u32 id,
625 			struct dlb2_create_dir_port_args *args,
626 			uintptr_t cq_dma_base,
627 			struct dlb2_cmd_response *resp)
628 {
629 	return dlb2_hw_create_dir_port(hw, id, args,
630 				       cq_dma_base,
631 				       resp,
632 				       NOT_VF_REQ,
633 				       PF_ID_ZERO);
634 }
635 
636 int
dlb2_pf_create_dir_queue(struct dlb2_hw * hw,u32 id,struct dlb2_create_dir_queue_args * args,struct dlb2_cmd_response * resp)637 dlb2_pf_create_dir_queue(struct dlb2_hw *hw,
638 			 u32 id,
639 			 struct dlb2_create_dir_queue_args *args,
640 			 struct dlb2_cmd_response *resp)
641 {
642 	return dlb2_hw_create_dir_queue(hw, id, args, resp, NOT_VF_REQ,
643 					PF_ID_ZERO);
644 }
645 
646 int
dlb2_pf_start_domain(struct dlb2_hw * hw,u32 id,struct dlb2_start_domain_args * args,struct dlb2_cmd_response * resp)647 dlb2_pf_start_domain(struct dlb2_hw *hw,
648 		     u32 id,
649 		     struct dlb2_start_domain_args *args,
650 		     struct dlb2_cmd_response *resp)
651 {
652 	return dlb2_hw_start_domain(hw, id, args, resp, NOT_VF_REQ,
653 				    PF_ID_ZERO);
654 }
655