1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static int
tim_fill_msix(struct roc_tim * roc_tim,uint16_t nb_ring)9 tim_fill_msix(struct roc_tim *roc_tim, uint16_t nb_ring)
10 {
11 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
12 struct tim *tim = roc_tim_to_tim_priv(roc_tim);
13 struct dev *dev = &sso->dev;
14 struct msix_offset_rsp *rsp;
15 int i, rc;
16
17 mbox_alloc_msg_msix_offset(dev->mbox);
18 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
19 if (rc)
20 return -EIO;
21
22 for (i = 0; i < nb_ring; i++)
23 tim->tim_msix_offsets[i] = rsp->timlf_msixoff[i];
24
25 return 0;
26 }
27
28 static void
tim_err_desc(int rc)29 tim_err_desc(int rc)
30 {
31 switch (rc) {
32 case TIM_AF_NO_RINGS_LEFT:
33 plt_err("Unable to allocate new TIM ring.");
34 break;
35 case TIM_AF_INVALID_NPA_PF_FUNC:
36 plt_err("Invalid NPA pf func.");
37 break;
38 case TIM_AF_INVALID_SSO_PF_FUNC:
39 plt_err("Invalid SSO pf func.");
40 break;
41 case TIM_AF_RING_STILL_RUNNING:
42 plt_err("Ring busy.");
43 break;
44 case TIM_AF_LF_INVALID:
45 plt_err("Invalid Ring id.");
46 break;
47 case TIM_AF_CSIZE_NOT_ALIGNED:
48 plt_err("Chunk size specified needs to be multiple of 16.");
49 break;
50 case TIM_AF_CSIZE_TOO_SMALL:
51 plt_err("Chunk size too small.");
52 break;
53 case TIM_AF_CSIZE_TOO_BIG:
54 plt_err("Chunk size too big.");
55 break;
56 case TIM_AF_INTERVAL_TOO_SMALL:
57 plt_err("Bucket traversal interval too small.");
58 break;
59 case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
60 plt_err("Invalid Big endian value.");
61 break;
62 case TIM_AF_INVALID_CLOCK_SOURCE:
63 plt_err("Invalid Clock source specified.");
64 break;
65 case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
66 plt_err("GPIO clock source not enabled.");
67 break;
68 case TIM_AF_INVALID_BSIZE:
69 plt_err("Invalid bucket size.");
70 break;
71 case TIM_AF_INVALID_ENABLE_PERIODIC:
72 plt_err("Invalid bucket size.");
73 break;
74 case TIM_AF_INVALID_ENABLE_DONTFREE:
75 plt_err("Invalid Don't free value.");
76 break;
77 case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
78 plt_err("Don't free bit not set when periodic is enabled.");
79 break;
80 case TIM_AF_RING_ALREADY_DISABLED:
81 plt_err("Ring already stopped");
82 break;
83 default:
84 plt_err("Unknown Error.");
85 }
86 }
87
88 int
roc_tim_lf_enable(struct roc_tim * roc_tim,uint8_t ring_id,uint64_t * start_tsc,uint32_t * cur_bkt)89 roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc,
90 uint32_t *cur_bkt)
91 {
92 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
93 struct dev *dev = &sso->dev;
94 struct tim_enable_rsp *rsp;
95 struct tim_ring_req *req;
96 int rc = -ENOSPC;
97
98 plt_spinlock_lock(&sso->mbox_lock);
99 req = mbox_alloc_msg_tim_enable_ring(dev->mbox);
100 if (req == NULL)
101 goto fail;
102 req->ring = ring_id;
103
104 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
105 if (rc) {
106 tim_err_desc(rc);
107 rc = -EIO;
108 goto fail;
109 }
110
111 if (cur_bkt)
112 *cur_bkt = rsp->currentbucket;
113 if (start_tsc)
114 *start_tsc = rsp->timestarted;
115
116 fail:
117 plt_spinlock_unlock(&sso->mbox_lock);
118 return rc;
119 }
120
121 int
roc_tim_lf_disable(struct roc_tim * roc_tim,uint8_t ring_id)122 roc_tim_lf_disable(struct roc_tim *roc_tim, uint8_t ring_id)
123 {
124 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
125 struct dev *dev = &sso->dev;
126 struct tim_ring_req *req;
127 int rc = -ENOSPC;
128
129 plt_spinlock_lock(&sso->mbox_lock);
130 req = mbox_alloc_msg_tim_disable_ring(dev->mbox);
131 if (req == NULL)
132 goto fail;
133 req->ring = ring_id;
134
135 rc = mbox_process(dev->mbox);
136 if (rc) {
137 tim_err_desc(rc);
138 rc = -EIO;
139 }
140
141 fail:
142 plt_spinlock_unlock(&sso->mbox_lock);
143 return rc;
144 }
145
146 uintptr_t
roc_tim_lf_base_get(struct roc_tim * roc_tim,uint8_t ring_id)147 roc_tim_lf_base_get(struct roc_tim *roc_tim, uint8_t ring_id)
148 {
149 struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
150
151 return dev->bar2 + (RVU_BLOCK_ADDR_TIM << 20 | ring_id << 12);
152 }
153
154 int
roc_tim_lf_config(struct roc_tim * roc_tim,uint8_t ring_id,enum roc_tim_clk_src clk_src,uint8_t ena_periodic,uint8_t ena_dfb,uint32_t bucket_sz,uint32_t chunk_sz,uint32_t interval,uint64_t intervalns,uint64_t clockfreq)155 roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id,
156 enum roc_tim_clk_src clk_src, uint8_t ena_periodic,
157 uint8_t ena_dfb, uint32_t bucket_sz, uint32_t chunk_sz,
158 uint32_t interval, uint64_t intervalns, uint64_t clockfreq)
159 {
160 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
161 struct dev *dev = &sso->dev;
162 struct tim_config_req *req;
163 int rc = -ENOSPC;
164
165 plt_spinlock_lock(&sso->mbox_lock);
166 req = mbox_alloc_msg_tim_config_ring(dev->mbox);
167 if (req == NULL)
168 goto fail;
169 req->ring = ring_id;
170 req->bigendian = false;
171 req->bucketsize = bucket_sz;
172 req->chunksize = chunk_sz;
173 req->clocksource = clk_src;
174 req->enableperiodic = ena_periodic;
175 req->enabledontfreebuffer = ena_dfb;
176 req->interval = interval;
177 req->intervalns = intervalns;
178 req->clockfreq = clockfreq;
179 req->gpioedge = TIM_GPIO_LTOH_TRANS;
180
181 rc = mbox_process(dev->mbox);
182 if (rc) {
183 tim_err_desc(rc);
184 rc = -EIO;
185 }
186
187 fail:
188 plt_spinlock_unlock(&sso->mbox_lock);
189 return rc;
190 }
191
192 int
roc_tim_lf_interval(struct roc_tim * roc_tim,enum roc_tim_clk_src clk_src,uint64_t clockfreq,uint64_t * intervalns,uint64_t * interval)193 roc_tim_lf_interval(struct roc_tim *roc_tim, enum roc_tim_clk_src clk_src,
194 uint64_t clockfreq, uint64_t *intervalns,
195 uint64_t *interval)
196 {
197 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
198 struct dev *dev = &sso->dev;
199 struct tim_intvl_req *req;
200 struct tim_intvl_rsp *rsp;
201 int rc = -ENOSPC;
202
203 plt_spinlock_lock(&sso->mbox_lock);
204 req = mbox_alloc_msg_tim_get_min_intvl(dev->mbox);
205 if (req == NULL)
206 goto fail;
207
208 req->clockfreq = clockfreq;
209 req->clocksource = clk_src;
210 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
211 if (rc) {
212 tim_err_desc(rc);
213 rc = -EIO;
214 goto fail;
215 }
216
217 *intervalns = rsp->intvl_ns;
218 *interval = rsp->intvl_cyc;
219
220 fail:
221 plt_spinlock_unlock(&sso->mbox_lock);
222 return rc;
223 }
224
225 int
roc_tim_lf_alloc(struct roc_tim * roc_tim,uint8_t ring_id,uint64_t * clk)226 roc_tim_lf_alloc(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *clk)
227 {
228 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
229 struct tim *tim = roc_tim_to_tim_priv(roc_tim);
230 struct tim_ring_req *free_req;
231 struct tim_lf_alloc_req *req;
232 struct tim_lf_alloc_rsp *rsp;
233 struct dev *dev = &sso->dev;
234 int rc = -ENOSPC;
235
236 plt_spinlock_lock(&sso->mbox_lock);
237 req = mbox_alloc_msg_tim_lf_alloc(dev->mbox);
238 if (req == NULL)
239 goto fail;
240 req->npa_pf_func = idev_npa_pffunc_get();
241 req->sso_pf_func = idev_sso_pffunc_get();
242 req->ring = ring_id;
243
244 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
245 if (rc) {
246 tim_err_desc(rc);
247 rc = -EIO;
248 goto fail;
249 }
250
251 if (clk)
252 *clk = rsp->tenns_clk;
253
254 rc = tim_register_irq_priv(roc_tim, sso->pci_dev->intr_handle, ring_id,
255 tim->tim_msix_offsets[ring_id]);
256 if (rc < 0) {
257 plt_tim_dbg("Failed to register Ring[%d] IRQ", ring_id);
258 free_req = mbox_alloc_msg_tim_lf_free(dev->mbox);
259 if (free_req == NULL) {
260 rc = -ENOSPC;
261 goto fail;
262 }
263 free_req->ring = ring_id;
264 rc = mbox_process(dev->mbox);
265 if (rc)
266 rc = -EIO;
267 }
268
269 fail:
270 plt_spinlock_unlock(&sso->mbox_lock);
271 return rc;
272 }
273
274 int
roc_tim_lf_free(struct roc_tim * roc_tim,uint8_t ring_id)275 roc_tim_lf_free(struct roc_tim *roc_tim, uint8_t ring_id)
276 {
277 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
278 struct tim *tim = roc_tim_to_tim_priv(roc_tim);
279 struct dev *dev = &sso->dev;
280 struct tim_ring_req *req;
281 int rc = -ENOSPC;
282
283 tim_unregister_irq_priv(roc_tim, sso->pci_dev->intr_handle, ring_id,
284 tim->tim_msix_offsets[ring_id]);
285
286 plt_spinlock_lock(&sso->mbox_lock);
287 req = mbox_alloc_msg_tim_lf_free(dev->mbox);
288 if (req == NULL)
289 goto fail;
290 req->ring = ring_id;
291
292 rc = mbox_process(dev->mbox);
293 if (rc < 0) {
294 tim_err_desc(rc);
295 rc = -EIO;
296 }
297
298 fail:
299 plt_spinlock_unlock(&sso->mbox_lock);
300 return 0;
301 }
302
303 int
roc_tim_init(struct roc_tim * roc_tim)304 roc_tim_init(struct roc_tim *roc_tim)
305 {
306 struct rsrc_attach_req *attach_req;
307 struct rsrc_detach_req *detach_req;
308 struct free_rsrcs_rsp *free_rsrc;
309 struct sso *sso;
310 uint16_t nb_lfs;
311 struct dev *dev;
312 int rc;
313
314 if (roc_tim == NULL || roc_tim->roc_sso == NULL)
315 return TIM_ERR_PARAM;
316
317 sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
318 dev = &sso->dev;
319 PLT_STATIC_ASSERT(sizeof(struct tim) <= TIM_MEM_SZ);
320 nb_lfs = roc_tim->nb_lfs;
321 plt_spinlock_lock(&sso->mbox_lock);
322 mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
323 rc = mbox_process_msg(dev->mbox, (void *)&free_rsrc);
324 if (rc) {
325 plt_err("Unable to get free rsrc count.");
326 nb_lfs = 0;
327 goto fail;
328 }
329
330 if (nb_lfs && (free_rsrc->tim < nb_lfs)) {
331 plt_tim_dbg("Requested LFs : %d Available LFs : %d", nb_lfs,
332 free_rsrc->tim);
333 nb_lfs = 0;
334 goto fail;
335 }
336
337 attach_req = mbox_alloc_msg_attach_resources(dev->mbox);
338 if (attach_req == NULL) {
339 nb_lfs = 0;
340 goto fail;
341 }
342 attach_req->modify = true;
343 attach_req->timlfs = nb_lfs ? nb_lfs : free_rsrc->tim;
344 nb_lfs = attach_req->timlfs;
345
346 rc = mbox_process(dev->mbox);
347 if (rc) {
348 plt_err("Unable to attach TIM LFs.");
349 nb_lfs = 0;
350 goto fail;
351 }
352
353 rc = tim_fill_msix(roc_tim, nb_lfs);
354 if (rc < 0) {
355 plt_err("Unable to get TIM MSIX vectors");
356
357 detach_req = mbox_alloc_msg_detach_resources(dev->mbox);
358 if (detach_req == NULL) {
359 nb_lfs = 0;
360 goto fail;
361 }
362 detach_req->partial = true;
363 detach_req->timlfs = true;
364 mbox_process(dev->mbox);
365 nb_lfs = 0;
366 }
367
368 fail:
369 plt_spinlock_unlock(&sso->mbox_lock);
370 return nb_lfs;
371 }
372
373 void
roc_tim_fini(struct roc_tim * roc_tim)374 roc_tim_fini(struct roc_tim *roc_tim)
375 {
376 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
377 struct rsrc_detach_req *detach_req;
378 struct dev *dev = &sso->dev;
379
380 plt_spinlock_lock(&sso->mbox_lock);
381 detach_req = mbox_alloc_msg_detach_resources(dev->mbox);
382 PLT_ASSERT(detach_req);
383 detach_req->partial = true;
384 detach_req->timlfs = true;
385
386 mbox_process(dev->mbox);
387 plt_spinlock_unlock(&sso->mbox_lock);
388 }
389