1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #define npa_dump plt_dump
9
10 static inline void
npa_pool_dump(__io struct npa_pool_s * pool)11 npa_pool_dump(__io struct npa_pool_s *pool)
12 {
13 npa_dump("W0: Stack base\t\t0x%" PRIx64 "", pool->stack_base);
14 npa_dump("W1: ena \t\t%d\nW1: nat_align \t\t%d\nW1: stack_caching \t%d",
15 pool->ena, pool->nat_align, pool->stack_caching);
16 npa_dump("W1: stack_way_mask\t%d\nW1: buf_offset\t\t%d",
17 pool->stack_way_mask, pool->buf_offset);
18 npa_dump("W1: buf_size \t\t%d", pool->buf_size);
19
20 npa_dump("W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d",
21 pool->stack_max_pages, pool->stack_pages);
22
23 npa_dump("W3: op_pc \t\t0x%" PRIx64 "", (uint64_t)pool->op_pc);
24
25 npa_dump("W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d",
26 pool->stack_offset, pool->shift, pool->avg_level);
27 npa_dump("W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d",
28 pool->avg_con, pool->fc_ena, pool->fc_stype);
29 npa_dump("W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d",
30 pool->fc_hyst_bits, pool->fc_up_crossing);
31 npa_dump("W4: update_time\t\t%d\n", pool->update_time);
32
33 npa_dump("W5: fc_addr\t\t0x%" PRIx64 "\n", pool->fc_addr);
34
35 npa_dump("W6: ptr_start\t\t0x%" PRIx64 "\n", pool->ptr_start);
36
37 npa_dump("W7: ptr_end\t\t0x%" PRIx64 "\n", pool->ptr_end);
38 npa_dump("W8: err_int\t\t%d\nW8: err_int_ena\t\t%d", pool->err_int,
39 pool->err_int_ena);
40 npa_dump("W8: thresh_int\t\t%d", pool->thresh_int);
41
42 npa_dump("W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d",
43 pool->thresh_int_ena, pool->thresh_up);
44 npa_dump("W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d",
45 pool->thresh_qint_idx, pool->err_qint_idx);
46 }
47
48 static inline void
npa_aura_dump(__io struct npa_aura_s * aura)49 npa_aura_dump(__io struct npa_aura_s *aura)
50 {
51 npa_dump("W0: Pool addr\t\t0x%" PRIx64 "\n", aura->pool_addr);
52
53 npa_dump("W1: ena\t\t\t%d\nW1: pool caching\t%d\nW1: pool way mask\t%d",
54 aura->ena, aura->pool_caching, aura->pool_way_mask);
55 npa_dump("W1: avg con\t\t%d\nW1: pool drop ena\t%d", aura->avg_con,
56 aura->pool_drop_ena);
57 npa_dump("W1: aura drop ena\t%d", aura->aura_drop_ena);
58 npa_dump("W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\nW1: aura shift\t\t%d",
59 aura->bp_ena, aura->aura_drop, aura->shift);
60 npa_dump("W1: avg_level\t\t%d\n", aura->avg_level);
61
62 npa_dump("W2: count\t\t%" PRIx64 "\nW2: nix0_bpid\t\t%d",
63 (uint64_t)aura->count, aura->nix0_bpid);
64 npa_dump("W2: nix1_bpid\t\t%d", aura->nix1_bpid);
65
66 npa_dump("W3: limit\t\t%" PRIx64 "\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
67 (uint64_t)aura->limit, aura->bp, aura->fc_ena);
68 npa_dump("W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d",
69 aura->fc_up_crossing, aura->fc_stype);
70
71 npa_dump("W3: fc_hyst_bits\t%d", aura->fc_hyst_bits);
72
73 npa_dump("W4: fc_addr\t\t0x%" PRIx64 "\n", aura->fc_addr);
74
75 npa_dump("W5: pool_drop\t\t%d\nW5: update_time\t\t%d", aura->pool_drop,
76 aura->update_time);
77 npa_dump("W5: err_int\t\t%d", aura->err_int);
78 npa_dump("W5: err_int_ena\t\t%d\nW5: thresh_int\t\t%d",
79 aura->err_int_ena, aura->thresh_int);
80 npa_dump("W5: thresh_int_ena\t%d", aura->thresh_int_ena);
81
82 npa_dump("W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d",
83 aura->thresh_up, aura->thresh_qint_idx);
84 npa_dump("W5: err_qint_idx\t%d", aura->err_qint_idx);
85
86 npa_dump("W6: thresh\t\t%" PRIx64 "\n", (uint64_t)aura->thresh);
87 }
88
89 int
roc_npa_ctx_dump(void)90 roc_npa_ctx_dump(void)
91 {
92 struct npa_aq_enq_req *aq;
93 struct npa_aq_enq_rsp *rsp;
94 struct npa_lf *lf;
95 uint32_t q;
96 int rc = 0;
97
98 lf = idev_npa_obj_get();
99 if (lf == NULL)
100 return NPA_ERR_DEVICE_NOT_BOUNDED;
101
102 for (q = 0; q < lf->nr_pools; q++) {
103 /* Skip disabled POOL */
104 if (plt_bitmap_get(lf->npa_bmp, q))
105 continue;
106
107 aq = mbox_alloc_msg_npa_aq_enq(lf->mbox);
108 if (aq == NULL)
109 return -ENOSPC;
110 aq->aura_id = q;
111 aq->ctype = NPA_AQ_CTYPE_POOL;
112 aq->op = NPA_AQ_INSTOP_READ;
113
114 rc = mbox_process_msg(lf->mbox, (void *)&rsp);
115 if (rc) {
116 plt_err("Failed to get pool(%d) context", q);
117 return rc;
118 }
119 npa_dump("============== pool=%d ===============\n", q);
120 npa_pool_dump(&rsp->pool);
121 }
122
123 for (q = 0; q < lf->nr_pools; q++) {
124 /* Skip disabled AURA */
125 if (plt_bitmap_get(lf->npa_bmp, q))
126 continue;
127
128 aq = mbox_alloc_msg_npa_aq_enq(lf->mbox);
129 if (aq == NULL)
130 return -ENOSPC;
131 aq->aura_id = q;
132 aq->ctype = NPA_AQ_CTYPE_AURA;
133 aq->op = NPA_AQ_INSTOP_READ;
134
135 rc = mbox_process_msg(lf->mbox, (void *)&rsp);
136 if (rc) {
137 plt_err("Failed to get aura(%d) context", q);
138 return rc;
139 }
140 npa_dump("============== aura=%d ===============\n", q);
141 npa_aura_dump(&rsp->aura);
142 }
143
144 return rc;
145 }
146
147 int
roc_npa_dump(void)148 roc_npa_dump(void)
149 {
150 struct npa_lf *lf;
151 int aura_cnt = 0;
152 uint32_t i;
153
154 lf = idev_npa_obj_get();
155 if (lf == NULL)
156 return NPA_ERR_DEVICE_NOT_BOUNDED;
157
158 for (i = 0; i < lf->nr_pools; i++) {
159 if (plt_bitmap_get(lf->npa_bmp, i))
160 continue;
161 aura_cnt++;
162 }
163
164 npa_dump("npa@%p", lf);
165 npa_dump(" pf = %d", dev_get_pf(lf->pf_func));
166 npa_dump(" vf = %d", dev_get_vf(lf->pf_func));
167 npa_dump(" aura_cnt = %d", aura_cnt);
168 npa_dump(" \tpci_dev = %p", lf->pci_dev);
169 npa_dump(" \tnpa_bmp = %p", lf->npa_bmp);
170 npa_dump(" \tnpa_bmp_mem = %p", lf->npa_bmp_mem);
171 npa_dump(" \tnpa_qint_mem = %p", lf->npa_qint_mem);
172 npa_dump(" \tintr_handle = %p", lf->intr_handle);
173 npa_dump(" \tmbox = %p", lf->mbox);
174 npa_dump(" \tbase = 0x%" PRIx64 "", lf->base);
175 npa_dump(" \tstack_pg_ptrs = %d", lf->stack_pg_ptrs);
176 npa_dump(" \tstack_pg_bytes = %d", lf->stack_pg_bytes);
177 npa_dump(" \tnpa_msixoff = 0x%x", lf->npa_msixoff);
178 npa_dump(" \tnr_pools = %d", lf->nr_pools);
179 npa_dump(" \tpf_func = 0x%x", lf->pf_func);
180 npa_dump(" \taura_sz = %d", lf->aura_sz);
181 npa_dump(" \tqints = %d", lf->qints);
182
183 return 0;
184 }
185