1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <assert.h>
10 #include <unistd.h>
11 #include <string.h>
12
13 #include <rte_malloc.h>
14 #include <rte_errno.h>
15
16 #include "base/dlb2_resource.h"
17 #include "base/dlb2_osdep.h"
18 #include "base/dlb2_regs.h"
19 #include "dlb2_main.h"
20 #include "../dlb2_user.h"
21 #include "../dlb2_priv.h"
22 #include "../dlb2_iface.h"
23 #include "../dlb2_inline_fns.h"
24
25 #define PF_ID_ZERO 0 /* PF ONLY! */
26 #define NO_OWNER_VF 0 /* PF ONLY! */
27 #define NOT_VF_REQ false /* PF ONLY! */
28
29 #define DLB2_PCI_CAP_POINTER 0x34
30 #define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
31 #define DLB2_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
32
33 #define DLB2_PCI_LNKCTL 16
34 #define DLB2_PCI_SLTCTL 24
35 #define DLB2_PCI_RTCTL 28
36 #define DLB2_PCI_EXP_DEVCTL2 40
37 #define DLB2_PCI_LNKCTL2 48
38 #define DLB2_PCI_SLTCTL2 56
39 #define DLB2_PCI_CMD 4
40 #define DLB2_PCI_EXP_DEVSTA 10
41 #define DLB2_PCI_EXP_DEVSTA_TRPND 0x20
42 #define DLB2_PCI_EXP_DEVCTL_BCR_FLR 0x8000
43
44 #define DLB2_PCI_CAP_ID_EXP 0x10
45 #define DLB2_PCI_CAP_ID_MSIX 0x11
46 #define DLB2_PCI_EXT_CAP_ID_PRI 0x13
47 #define DLB2_PCI_EXT_CAP_ID_ACS 0xD
48
49 #define DLB2_PCI_PRI_CTRL_ENABLE 0x1
50 #define DLB2_PCI_PRI_ALLOC_REQ 0xC
51 #define DLB2_PCI_PRI_CTRL 0x4
52 #define DLB2_PCI_MSIX_FLAGS 0x2
53 #define DLB2_PCI_MSIX_FLAGS_ENABLE 0x8000
54 #define DLB2_PCI_MSIX_FLAGS_MASKALL 0x4000
55 #define DLB2_PCI_ERR_ROOT_STATUS 0x30
56 #define DLB2_PCI_ERR_COR_STATUS 0x10
57 #define DLB2_PCI_ERR_UNCOR_STATUS 0x4
58 #define DLB2_PCI_COMMAND_INTX_DISABLE 0x400
59 #define DLB2_PCI_ACS_CAP 0x4
60 #define DLB2_PCI_ACS_CTRL 0x6
61 #define DLB2_PCI_ACS_SV 0x1
62 #define DLB2_PCI_ACS_RR 0x4
63 #define DLB2_PCI_ACS_CR 0x8
64 #define DLB2_PCI_ACS_UF 0x10
65 #define DLB2_PCI_ACS_EC 0x20
66
dlb2_pci_find_capability(struct rte_pci_device * pdev,uint32_t id)67 static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
68 {
69 uint8_t pos;
70 int ret;
71 uint16_t hdr;
72
73 ret = rte_pci_read_config(pdev, &pos, 1, DLB2_PCI_CAP_POINTER);
74 pos &= 0xFC;
75
76 if (ret != 1)
77 return -1;
78
79 while (pos > 0x3F) {
80 ret = rte_pci_read_config(pdev, &hdr, 2, pos);
81 if (ret != 2)
82 return -1;
83
84 if (DLB2_PCI_CAP_ID(hdr) == id)
85 return pos;
86
87 if (DLB2_PCI_CAP_ID(hdr) == 0xFF)
88 return -1;
89
90 pos = DLB2_PCI_CAP_NEXT(hdr);
91 }
92
93 return -1;
94 }
95
96 static int
dlb2_pf_init_driver_state(struct dlb2_dev * dlb2_dev)97 dlb2_pf_init_driver_state(struct dlb2_dev *dlb2_dev)
98 {
99 rte_spinlock_init(&dlb2_dev->resource_mutex);
100
101 return 0;
102 }
103
dlb2_pf_enable_pm(struct dlb2_dev * dlb2_dev)104 static void dlb2_pf_enable_pm(struct dlb2_dev *dlb2_dev)
105 {
106 dlb2_clr_pmcsr_disable(&dlb2_dev->hw);
107 }
108
109 #define DLB2_READY_RETRY_LIMIT 1000
dlb2_pf_wait_for_device_ready(struct dlb2_dev * dlb2_dev)110 static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev)
111 {
112 u32 retries = 0;
113
114 /* Allow at least 1s for the device to become active after power-on */
115 for (retries = 0; retries < DLB2_READY_RETRY_LIMIT; retries++) {
116 union dlb2_cfg_mstr_cfg_diagnostic_idle_status idle;
117 union dlb2_cfg_mstr_cfg_pm_status pm_st;
118 u32 addr;
119
120 addr = DLB2_CFG_MSTR_CFG_PM_STATUS;
121 pm_st.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
122 addr = DLB2_CFG_MSTR_CFG_DIAGNOSTIC_IDLE_STATUS;
123 idle.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
124 if (pm_st.field.pmsm == 1 && idle.field.dlb_func_idle == 1)
125 break;
126
127 rte_delay_ms(1);
128 };
129
130 if (retries == DLB2_READY_RETRY_LIMIT) {
131 DLB2_LOG_ERR("[%s()] wait for device ready timed out\n",
132 __func__);
133 return -1;
134 }
135
136 return 0;
137 }
138
139 struct dlb2_dev *
dlb2_probe(struct rte_pci_device * pdev)140 dlb2_probe(struct rte_pci_device *pdev)
141 {
142 struct dlb2_dev *dlb2_dev;
143 int ret = 0;
144
145 DLB2_INFO(dlb2_dev, "probe\n");
146
147 dlb2_dev = rte_malloc("DLB2_PF", sizeof(struct dlb2_dev),
148 RTE_CACHE_LINE_SIZE);
149
150 if (dlb2_dev == NULL) {
151 ret = -ENOMEM;
152 goto dlb2_dev_malloc_fail;
153 }
154
155 /* PCI Bus driver has already mapped bar space into process.
156 * Save off our IO register and FUNC addresses.
157 */
158
159 /* BAR 0 */
160 if (pdev->mem_resource[0].addr == NULL) {
161 DLB2_ERR(dlb2_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
162 ret = -EINVAL;
163 goto pci_mmap_bad_addr;
164 }
165 dlb2_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
166 dlb2_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
167
168 DLB2_INFO(dlb2_dev, "DLB2 FUNC VA=%p, PA=%p, len=%p\n",
169 (void *)dlb2_dev->hw.func_kva,
170 (void *)dlb2_dev->hw.func_phys_addr,
171 (void *)(pdev->mem_resource[0].len));
172
173 /* BAR 2 */
174 if (pdev->mem_resource[2].addr == NULL) {
175 DLB2_ERR(dlb2_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
176 ret = -EINVAL;
177 goto pci_mmap_bad_addr;
178 }
179 dlb2_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
180 dlb2_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
181
182 DLB2_INFO(dlb2_dev, "DLB2 CSR VA=%p, PA=%p, len=%p\n",
183 (void *)dlb2_dev->hw.csr_kva,
184 (void *)dlb2_dev->hw.csr_phys_addr,
185 (void *)(pdev->mem_resource[2].len));
186
187 dlb2_dev->pdev = pdev;
188
189 /* PM enable must be done before any other MMIO accesses, and this
190 * setting is persistent across device reset.
191 */
192 dlb2_pf_enable_pm(dlb2_dev);
193
194 ret = dlb2_pf_wait_for_device_ready(dlb2_dev);
195 if (ret)
196 goto wait_for_device_ready_fail;
197
198 ret = dlb2_pf_reset(dlb2_dev);
199 if (ret)
200 goto dlb2_reset_fail;
201
202 ret = dlb2_pf_init_driver_state(dlb2_dev);
203 if (ret)
204 goto init_driver_state_fail;
205
206 ret = dlb2_resource_init(&dlb2_dev->hw);
207 if (ret)
208 goto resource_init_fail;
209
210 return dlb2_dev;
211
212 resource_init_fail:
213 dlb2_resource_free(&dlb2_dev->hw);
214 init_driver_state_fail:
215 dlb2_reset_fail:
216 pci_mmap_bad_addr:
217 wait_for_device_ready_fail:
218 rte_free(dlb2_dev);
219 dlb2_dev_malloc_fail:
220 rte_errno = ret;
221 return NULL;
222 }
223
224 int
dlb2_pf_reset(struct dlb2_dev * dlb2_dev)225 dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
226 {
227 int ret = 0;
228 int i = 0;
229 uint32_t dword[16];
230 uint16_t cmd;
231 off_t off;
232
233 uint16_t dev_ctl_word;
234 uint16_t dev_ctl2_word;
235 uint16_t lnk_word;
236 uint16_t lnk_word2;
237 uint16_t slt_word;
238 uint16_t slt_word2;
239 uint16_t rt_ctl_word;
240 uint32_t pri_reqs_dword;
241 uint16_t pri_ctrl_word;
242
243 int pcie_cap_offset;
244 int pri_cap_offset;
245 int msix_cap_offset;
246 int err_cap_offset;
247 int acs_cap_offset;
248 int wait_count;
249
250 uint16_t devsta_busy_word;
251 uint16_t devctl_word;
252
253 struct rte_pci_device *pdev = dlb2_dev->pdev;
254
255 /* Save PCI config state */
256
257 for (i = 0; i < 16; i++) {
258 if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
259 return ret;
260 }
261
262 pcie_cap_offset = dlb2_pci_find_capability(pdev, DLB2_PCI_CAP_ID_EXP);
263
264 if (pcie_cap_offset < 0) {
265 DLB2_LOG_ERR("[%s()] failed to find the pcie capability\n",
266 __func__);
267 return pcie_cap_offset;
268 }
269
270 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
271 if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
272 dev_ctl_word = 0;
273
274 off = pcie_cap_offset + DLB2_PCI_LNKCTL;
275 if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
276 lnk_word = 0;
277
278 off = pcie_cap_offset + DLB2_PCI_SLTCTL;
279 if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
280 slt_word = 0;
281
282 off = pcie_cap_offset + DLB2_PCI_RTCTL;
283 if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
284 rt_ctl_word = 0;
285
286 off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
287 if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
288 dev_ctl2_word = 0;
289
290 off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
291 if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
292 lnk_word2 = 0;
293
294 off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
295 if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
296 slt_word2 = 0;
297
298 off = DLB2_PCI_EXT_CAP_ID_PRI;
299 pri_cap_offset = rte_pci_find_ext_capability(pdev, off);
300
301 if (pri_cap_offset >= 0) {
302 off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
303 if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
304 pri_reqs_dword = 0;
305 }
306
307 /* clear the PCI command register before issuing the FLR */
308
309 off = DLB2_PCI_CMD;
310 cmd = 0;
311 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
312 DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
313 __func__);
314 return ret;
315 }
316
317 /* issue the FLR */
318 for (wait_count = 0; wait_count < 4; wait_count++) {
319 int sleep_time;
320
321 off = pcie_cap_offset + DLB2_PCI_EXP_DEVSTA;
322 ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
323 if (ret != 2) {
324 DLB2_LOG_ERR("[%s()] failed to read the pci device status\n",
325 __func__);
326 return ret;
327 }
328
329 if (!(devsta_busy_word & DLB2_PCI_EXP_DEVSTA_TRPND))
330 break;
331
332 sleep_time = (1 << (wait_count)) * 100;
333 rte_delay_ms(sleep_time);
334 }
335
336 if (wait_count == 4) {
337 DLB2_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
338 __func__);
339 return -1;
340 }
341
342 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
343 ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
344 if (ret != 2) {
345 DLB2_LOG_ERR("[%s()] failed to read the pcie device control\n",
346 __func__);
347 return ret;
348 }
349
350 devctl_word |= DLB2_PCI_EXP_DEVCTL_BCR_FLR;
351
352 ret = rte_pci_write_config(pdev, &devctl_word, 2, off);
353 if (ret != 2) {
354 DLB2_LOG_ERR("[%s()] failed to write the pcie device control\n",
355 __func__);
356 return ret;
357 }
358
359 rte_delay_ms(100);
360
361 /* Restore PCI config state */
362
363 if (pcie_cap_offset >= 0) {
364 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
365 ret = rte_pci_write_config(pdev, &dev_ctl_word, 2, off);
366 if (ret != 2) {
367 DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
368 __func__, (int)off);
369 return ret;
370 }
371
372 off = pcie_cap_offset + DLB2_PCI_LNKCTL;
373 ret = rte_pci_write_config(pdev, &lnk_word, 2, off);
374 if (ret != 2) {
375 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
376 __func__, (int)off);
377 return ret;
378 }
379
380 off = pcie_cap_offset + DLB2_PCI_SLTCTL;
381 ret = rte_pci_write_config(pdev, &slt_word, 2, off);
382 if (ret != 2) {
383 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
384 __func__, (int)off);
385 return ret;
386 }
387
388 off = pcie_cap_offset + DLB2_PCI_RTCTL;
389 ret = rte_pci_write_config(pdev, &rt_ctl_word, 2, off);
390 if (ret != 2) {
391 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
392 __func__, (int)off);
393 return ret;
394 }
395
396 off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
397 ret = rte_pci_write_config(pdev, &dev_ctl2_word, 2, off);
398 if (ret != 2) {
399 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
400 __func__, (int)off);
401 return ret;
402 }
403
404 off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
405 ret = rte_pci_write_config(pdev, &lnk_word2, 2, off);
406 if (ret != 2) {
407 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
408 __func__, (int)off);
409 return ret;
410 }
411
412 off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
413 ret = rte_pci_write_config(pdev, &slt_word2, 2, off);
414 if (ret != 2) {
415 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
416 __func__, (int)off);
417 return ret;
418 }
419 }
420
421 if (pri_cap_offset >= 0) {
422 pri_ctrl_word = DLB2_PCI_PRI_CTRL_ENABLE;
423
424 off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
425 ret = rte_pci_write_config(pdev, &pri_reqs_dword, 4, off);
426 if (ret != 4) {
427 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
428 __func__, (int)off);
429 return ret;
430 }
431
432 off = pri_cap_offset + DLB2_PCI_PRI_CTRL;
433 ret = rte_pci_write_config(pdev, &pri_ctrl_word, 2, off);
434 if (ret != 2) {
435 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
436 __func__, (int)off);
437 return ret;
438 }
439 }
440
441 off = RTE_PCI_EXT_CAP_ID_ERR;
442 err_cap_offset = rte_pci_find_ext_capability(pdev, off);
443
444 if (err_cap_offset >= 0) {
445 uint32_t tmp;
446
447 off = err_cap_offset + DLB2_PCI_ERR_ROOT_STATUS;
448 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
449 tmp = 0;
450
451 ret = rte_pci_write_config(pdev, &tmp, 4, off);
452 if (ret != 4) {
453 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
454 __func__, (int)off);
455 return ret;
456 }
457
458 off = err_cap_offset + DLB2_PCI_ERR_COR_STATUS;
459 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
460 tmp = 0;
461
462 ret = rte_pci_write_config(pdev, &tmp, 4, off);
463 if (ret != 4) {
464 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
465 __func__, (int)off);
466 return ret;
467 }
468
469 off = err_cap_offset + DLB2_PCI_ERR_UNCOR_STATUS;
470 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
471 tmp = 0;
472
473 ret = rte_pci_write_config(pdev, &tmp, 4, off);
474 if (ret != 4) {
475 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
476 __func__, (int)off);
477 return ret;
478 }
479 }
480
481 for (i = 16; i > 0; i--) {
482 off = (i - 1) * 4;
483 ret = rte_pci_write_config(pdev, &dword[i - 1], 4, off);
484 if (ret != 4) {
485 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
486 __func__, (int)off);
487 return ret;
488 }
489 }
490
491 off = DLB2_PCI_CMD;
492 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
493 cmd &= ~DLB2_PCI_COMMAND_INTX_DISABLE;
494 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
495 DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
496 __func__);
497 return ret;
498 }
499 }
500
501 msix_cap_offset = dlb2_pci_find_capability(pdev,
502 DLB2_PCI_CAP_ID_MSIX);
503 if (msix_cap_offset >= 0) {
504 off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
505 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
506 cmd |= DLB2_PCI_MSIX_FLAGS_ENABLE;
507 cmd |= DLB2_PCI_MSIX_FLAGS_MASKALL;
508 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
509 DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
510 __func__);
511 return ret;
512 }
513 }
514
515 off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
516 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
517 cmd &= ~DLB2_PCI_MSIX_FLAGS_MASKALL;
518 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
519 DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
520 __func__);
521 return ret;
522 }
523 }
524 }
525
526 off = DLB2_PCI_EXT_CAP_ID_ACS;
527 acs_cap_offset = rte_pci_find_ext_capability(pdev, off);
528
529 if (acs_cap_offset >= 0) {
530 uint16_t acs_cap, acs_ctrl, acs_mask;
531 off = acs_cap_offset + DLB2_PCI_ACS_CAP;
532 if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
533 acs_cap = 0;
534
535 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
536 if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
537 acs_ctrl = 0;
538
539 acs_mask = DLB2_PCI_ACS_SV | DLB2_PCI_ACS_RR;
540 acs_mask |= (DLB2_PCI_ACS_CR | DLB2_PCI_ACS_UF);
541 acs_ctrl |= (acs_cap & acs_mask);
542
543 ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
544 if (ret != 2) {
545 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
546 __func__, (int)off);
547 return ret;
548 }
549
550 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
551 if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
552 acs_ctrl = 0;
553
554 acs_mask = DLB2_PCI_ACS_RR | DLB2_PCI_ACS_CR;
555 acs_mask |= DLB2_PCI_ACS_EC;
556 acs_ctrl &= ~acs_mask;
557
558 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
559 ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
560 if (ret != 2) {
561 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
562 __func__, (int)off);
563 return ret;
564 }
565 }
566
567 return 0;
568 }
569
570 int
dlb2_pf_create_sched_domain(struct dlb2_hw * hw,struct dlb2_create_sched_domain_args * args,struct dlb2_cmd_response * resp)571 dlb2_pf_create_sched_domain(struct dlb2_hw *hw,
572 struct dlb2_create_sched_domain_args *args,
573 struct dlb2_cmd_response *resp)
574 {
575 return dlb2_hw_create_sched_domain(hw, args, resp, NOT_VF_REQ,
576 PF_ID_ZERO);
577 }
578
579 int
dlb2_pf_reset_domain(struct dlb2_hw * hw,u32 id)580 dlb2_pf_reset_domain(struct dlb2_hw *hw, u32 id)
581 {
582 return dlb2_reset_domain(hw, id, NOT_VF_REQ, PF_ID_ZERO);
583 }
584
585 int
dlb2_pf_create_ldb_queue(struct dlb2_hw * hw,u32 id,struct dlb2_create_ldb_queue_args * args,struct dlb2_cmd_response * resp)586 dlb2_pf_create_ldb_queue(struct dlb2_hw *hw,
587 u32 id,
588 struct dlb2_create_ldb_queue_args *args,
589 struct dlb2_cmd_response *resp)
590 {
591 return dlb2_hw_create_ldb_queue(hw, id, args, resp, NOT_VF_REQ,
592 PF_ID_ZERO);
593 }
594
595 int
dlb2_pf_create_ldb_port(struct dlb2_hw * hw,u32 id,struct dlb2_create_ldb_port_args * args,uintptr_t cq_dma_base,struct dlb2_cmd_response * resp)596 dlb2_pf_create_ldb_port(struct dlb2_hw *hw,
597 u32 id,
598 struct dlb2_create_ldb_port_args *args,
599 uintptr_t cq_dma_base,
600 struct dlb2_cmd_response *resp)
601 {
602 return dlb2_hw_create_ldb_port(hw, id, args,
603 cq_dma_base,
604 resp,
605 NOT_VF_REQ,
606 PF_ID_ZERO);
607 }
608
609 int
dlb2_pf_create_dir_port(struct dlb2_hw * hw,u32 id,struct dlb2_create_dir_port_args * args,uintptr_t cq_dma_base,struct dlb2_cmd_response * resp)610 dlb2_pf_create_dir_port(struct dlb2_hw *hw,
611 u32 id,
612 struct dlb2_create_dir_port_args *args,
613 uintptr_t cq_dma_base,
614 struct dlb2_cmd_response *resp)
615 {
616 return dlb2_hw_create_dir_port(hw, id, args,
617 cq_dma_base,
618 resp,
619 NOT_VF_REQ,
620 PF_ID_ZERO);
621 }
622
623 int
dlb2_pf_create_dir_queue(struct dlb2_hw * hw,u32 id,struct dlb2_create_dir_queue_args * args,struct dlb2_cmd_response * resp)624 dlb2_pf_create_dir_queue(struct dlb2_hw *hw,
625 u32 id,
626 struct dlb2_create_dir_queue_args *args,
627 struct dlb2_cmd_response *resp)
628 {
629 return dlb2_hw_create_dir_queue(hw, id, args, resp, NOT_VF_REQ,
630 PF_ID_ZERO);
631 }
632
633 int
dlb2_pf_start_domain(struct dlb2_hw * hw,u32 id,struct dlb2_start_domain_args * args,struct dlb2_cmd_response * resp)634 dlb2_pf_start_domain(struct dlb2_hw *hw,
635 u32 id,
636 struct dlb2_start_domain_args *args,
637 struct dlb2_cmd_response *resp)
638 {
639 return dlb2_hw_start_domain(hw, id, args, resp, NOT_VF_REQ,
640 PF_ID_ZERO);
641 }
642