1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #ifndef __OTX2_MEMPOOL_H__
6 #define __OTX2_MEMPOOL_H__
7
8 #include <rte_bitmap.h>
9 #include <rte_bus_pci.h>
10 #include <rte_devargs.h>
11 #include <rte_mempool.h>
12
13 #include "otx2_common.h"
14 #include "otx2_mbox.h"
15
16 enum npa_lf_status {
17 NPA_LF_ERR_PARAM = -512,
18 NPA_LF_ERR_ALLOC = -513,
19 NPA_LF_ERR_INVALID_BLOCK_SZ = -514,
20 NPA_LF_ERR_AURA_ID_ALLOC = -515,
21 NPA_LF_ERR_AURA_POOL_INIT = -516,
22 NPA_LF_ERR_AURA_POOL_FINI = -517,
23 NPA_LF_ERR_BASE_INVALID = -518,
24 };
25
26 struct otx2_npa_lf;
27 struct otx2_npa_qint {
28 struct otx2_npa_lf *lf;
29 uint8_t qintx;
30 };
31
32 struct npa_aura_lim {
33 uint64_t ptr_start;
34 uint64_t ptr_end;
35 };
36
37 struct otx2_npa_lf {
38 uint16_t qints;
39 uintptr_t base;
40 uint8_t aura_sz;
41 uint16_t pf_func;
42 uint32_t nr_pools;
43 void *npa_bmp_mem;
44 void *npa_qint_mem;
45 uint16_t npa_msixoff;
46 struct otx2_mbox *mbox;
47 uint32_t stack_pg_ptrs;
48 uint32_t stack_pg_bytes;
49 struct rte_bitmap *npa_bmp;
50 struct npa_aura_lim *aura_lim;
51 struct rte_pci_device *pci_dev;
52 struct rte_intr_handle *intr_handle;
53 };
54
55 #define AURA_ID_MASK (BIT_ULL(16) - 1)
56
57 /*
58 * Generate 64bit handle to have optimized alloc and free aura operation.
59 * 0 - AURA_ID_MASK for storing the aura_id.
60 * AURA_ID_MASK+1 - (2^64 - 1) for storing the lf base address.
61 * This scheme is valid when OS can give AURA_ID_MASK
62 * aligned address for lf base address.
63 */
64 static inline uint64_t
npa_lf_aura_handle_gen(uint32_t aura_id,uintptr_t addr)65 npa_lf_aura_handle_gen(uint32_t aura_id, uintptr_t addr)
66 {
67 uint64_t val;
68
69 val = aura_id & AURA_ID_MASK;
70 return (uint64_t)addr | val;
71 }
72
73 static inline uint64_t
npa_lf_aura_handle_to_aura(uint64_t aura_handle)74 npa_lf_aura_handle_to_aura(uint64_t aura_handle)
75 {
76 return aura_handle & AURA_ID_MASK;
77 }
78
79 static inline uintptr_t
npa_lf_aura_handle_to_base(uint64_t aura_handle)80 npa_lf_aura_handle_to_base(uint64_t aura_handle)
81 {
82 return (uintptr_t)(aura_handle & ~AURA_ID_MASK);
83 }
84
85 static inline uint64_t
npa_lf_aura_op_alloc(uint64_t aura_handle,const int drop)86 npa_lf_aura_op_alloc(uint64_t aura_handle, const int drop)
87 {
88 uint64_t wdata = npa_lf_aura_handle_to_aura(aura_handle);
89
90 if (drop)
91 wdata |= BIT_ULL(63); /* DROP */
92
93 return otx2_atomic64_add_nosync(wdata,
94 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
95 NPA_LF_AURA_OP_ALLOCX(0)));
96 }
97
98 static inline void
npa_lf_aura_op_free(uint64_t aura_handle,const int fabs,uint64_t iova)99 npa_lf_aura_op_free(uint64_t aura_handle, const int fabs, uint64_t iova)
100 {
101 uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
102
103 if (fabs)
104 reg |= BIT_ULL(63); /* FABS */
105
106 otx2_store_pair(iova, reg,
107 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_FREE0);
108 }
109
110 static inline uint64_t
npa_lf_aura_op_cnt_get(uint64_t aura_handle)111 npa_lf_aura_op_cnt_get(uint64_t aura_handle)
112 {
113 uint64_t wdata;
114 uint64_t reg;
115
116 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
117
118 reg = otx2_atomic64_add_nosync(wdata,
119 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
120 NPA_LF_AURA_OP_CNT));
121
122 if (reg & BIT_ULL(42) /* OP_ERR */)
123 return 0;
124 else
125 return reg & 0xFFFFFFFFF;
126 }
127
128 static inline void
npa_lf_aura_op_cnt_set(uint64_t aura_handle,const int sign,uint64_t count)129 npa_lf_aura_op_cnt_set(uint64_t aura_handle, const int sign, uint64_t count)
130 {
131 uint64_t reg = count & (BIT_ULL(36) - 1);
132
133 if (sign)
134 reg |= BIT_ULL(43); /* CNT_ADD */
135
136 reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
137
138 otx2_write64(reg,
139 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_CNT);
140 }
141
142 static inline uint64_t
npa_lf_aura_op_limit_get(uint64_t aura_handle)143 npa_lf_aura_op_limit_get(uint64_t aura_handle)
144 {
145 uint64_t wdata;
146 uint64_t reg;
147
148 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
149
150 reg = otx2_atomic64_add_nosync(wdata,
151 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
152 NPA_LF_AURA_OP_LIMIT));
153
154 if (reg & BIT_ULL(42) /* OP_ERR */)
155 return 0;
156 else
157 return reg & 0xFFFFFFFFF;
158 }
159
160 static inline void
npa_lf_aura_op_limit_set(uint64_t aura_handle,uint64_t limit)161 npa_lf_aura_op_limit_set(uint64_t aura_handle, uint64_t limit)
162 {
163 uint64_t reg = limit & (BIT_ULL(36) - 1);
164
165 reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
166
167 otx2_write64(reg,
168 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_LIMIT);
169 }
170
171 static inline uint64_t
npa_lf_aura_op_available(uint64_t aura_handle)172 npa_lf_aura_op_available(uint64_t aura_handle)
173 {
174 uint64_t wdata;
175 uint64_t reg;
176
177 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
178
179 reg = otx2_atomic64_add_nosync(wdata,
180 (int64_t *)(npa_lf_aura_handle_to_base(
181 aura_handle) + NPA_LF_POOL_OP_AVAILABLE));
182
183 if (reg & BIT_ULL(42) /* OP_ERR */)
184 return 0;
185 else
186 return reg & 0xFFFFFFFFF;
187 }
188
189 static inline void
npa_lf_aura_op_range_set(uint64_t aura_handle,uint64_t start_iova,uint64_t end_iova)190 npa_lf_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
191 uint64_t end_iova)
192 {
193 uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
194 struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
195 struct npa_aura_lim *lim = lf->aura_lim;
196
197 lim[reg].ptr_start = RTE_MIN(lim[reg].ptr_start, start_iova);
198 lim[reg].ptr_end = RTE_MAX(lim[reg].ptr_end, end_iova);
199
200 otx2_store_pair(lim[reg].ptr_start, reg,
201 npa_lf_aura_handle_to_base(aura_handle) +
202 NPA_LF_POOL_OP_PTR_START0);
203 otx2_store_pair(lim[reg].ptr_end, reg,
204 npa_lf_aura_handle_to_base(aura_handle) +
205 NPA_LF_POOL_OP_PTR_END0);
206 }
207
208 /* NPA LF */
209 __rte_internal
210 int otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev);
211 __rte_internal
212 int otx2_npa_lf_fini(void);
213
214 /* IRQ */
215 int otx2_npa_register_irqs(struct otx2_npa_lf *lf);
216 void otx2_npa_unregister_irqs(struct otx2_npa_lf *lf);
217
218 /* Debug */
219 int otx2_mempool_ctx_dump(struct otx2_npa_lf *lf);
220
221 #endif /* __OTX2_MEMPOOL_H__ */
222