1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <rte_kvargs.h>
6 #include <rte_malloc.h>
7 #include <rte_mbuf_pool_ops.h>
8 
9 #include "otx2_evdev.h"
10 #include "otx2_tim_evdev.h"
11 
12 static struct rte_event_timer_adapter_ops otx2_tim_ops;
13 
14 static inline int
tim_get_msix_offsets(void)15 tim_get_msix_offsets(void)
16 {
17 	struct otx2_tim_evdev *dev = tim_priv_get();
18 	struct otx2_mbox *mbox = dev->mbox;
19 	struct msix_offset_rsp *msix_rsp;
20 	int i, rc;
21 
22 	/* Get TIM MSIX vector offsets */
23 	otx2_mbox_alloc_msg_msix_offset(mbox);
24 	rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
25 
26 	for (i = 0; i < dev->nb_rings; i++)
27 		dev->tim_msixoff[i] = msix_rsp->timlf_msixoff[i];
28 
29 	return rc;
30 }
31 
32 static void
tim_set_fp_ops(struct otx2_tim_ring * tim_ring)33 tim_set_fp_ops(struct otx2_tim_ring *tim_ring)
34 {
35 	uint8_t prod_flag = !tim_ring->prod_type_sp;
36 
37 	/* [MOD/AND] [DFB/FB] [SP][MP]*/
38 	const rte_event_timer_arm_burst_t arm_burst[2][2][2][2] = {
39 #define FP(_name, _f4, _f3, _f2, _f1, flags) \
40 		[_f4][_f3][_f2][_f1] = otx2_tim_arm_burst_ ## _name,
41 TIM_ARM_FASTPATH_MODES
42 #undef FP
43 	};
44 
45 	const rte_event_timer_arm_tmo_tick_burst_t arm_tmo_burst[2][2][2] = {
46 #define FP(_name, _f3, _f2, _f1, flags) \
47 		[_f3][_f2][_f1] = otx2_tim_arm_tmo_tick_burst_ ## _name,
48 TIM_ARM_TMO_FASTPATH_MODES
49 #undef FP
50 	};
51 
52 	otx2_tim_ops.arm_burst =
53 		arm_burst[tim_ring->enable_stats][tim_ring->optimized]
54 			[tim_ring->ena_dfb][prod_flag];
55 	otx2_tim_ops.arm_tmo_tick_burst =
56 		arm_tmo_burst[tim_ring->enable_stats][tim_ring->optimized]
57 			[tim_ring->ena_dfb];
58 	otx2_tim_ops.cancel_burst = otx2_tim_timer_cancel_burst;
59 }
60 
61 static void
otx2_tim_ring_info_get(const struct rte_event_timer_adapter * adptr,struct rte_event_timer_adapter_info * adptr_info)62 otx2_tim_ring_info_get(const struct rte_event_timer_adapter *adptr,
63 		       struct rte_event_timer_adapter_info *adptr_info)
64 {
65 	struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
66 
67 	adptr_info->max_tmo_ns = tim_ring->max_tout;
68 	adptr_info->min_resolution_ns = tim_ring->tck_nsec;
69 	rte_memcpy(&adptr_info->conf, &adptr->data->conf,
70 		   sizeof(struct rte_event_timer_adapter_conf));
71 }
72 
73 static void
tim_optimze_bkt_param(struct otx2_tim_ring * tim_ring)74 tim_optimze_bkt_param(struct otx2_tim_ring *tim_ring)
75 {
76 	uint64_t tck_nsec;
77 	uint32_t hbkts;
78 	uint32_t lbkts;
79 
80 	hbkts = rte_align32pow2(tim_ring->nb_bkts);
81 	tck_nsec = RTE_ALIGN_MUL_CEIL(tim_ring->max_tout / (hbkts - 1), 10);
82 
83 	if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
84 				  tim_ring->tenns_clk_freq) ||
85 	    hbkts > OTX2_TIM_MAX_BUCKETS))
86 		hbkts = 0;
87 
88 	lbkts = rte_align32prevpow2(tim_ring->nb_bkts);
89 	tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout / (lbkts - 1)), 10);
90 
91 	if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
92 				  tim_ring->tenns_clk_freq) ||
93 	    lbkts > OTX2_TIM_MAX_BUCKETS))
94 		lbkts = 0;
95 
96 	if (!hbkts && !lbkts)
97 		return;
98 
99 	if (!hbkts) {
100 		tim_ring->nb_bkts = lbkts;
101 		goto end;
102 	} else if (!lbkts) {
103 		tim_ring->nb_bkts = hbkts;
104 		goto end;
105 	}
106 
107 	tim_ring->nb_bkts = (hbkts - tim_ring->nb_bkts) <
108 		(tim_ring->nb_bkts - lbkts) ? hbkts : lbkts;
109 end:
110 	tim_ring->optimized = true;
111 	tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout /
112 						(tim_ring->nb_bkts - 1)), 10);
113 	otx2_tim_dbg("Optimized configured values");
114 	otx2_tim_dbg("Nb_bkts  : %" PRIu32 "", tim_ring->nb_bkts);
115 	otx2_tim_dbg("Tck_nsec : %" PRIu64 "", tim_ring->tck_nsec);
116 }
117 
118 static int
tim_chnk_pool_create(struct otx2_tim_ring * tim_ring,struct rte_event_timer_adapter_conf * rcfg)119 tim_chnk_pool_create(struct otx2_tim_ring *tim_ring,
120 		     struct rte_event_timer_adapter_conf *rcfg)
121 {
122 	unsigned int cache_sz = (tim_ring->nb_chunks / 1.5);
123 	unsigned int mp_flags = 0;
124 	char pool_name[25];
125 	int rc;
126 
127 	cache_sz /= rte_lcore_count();
128 	/* Create chunk pool. */
129 	if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
130 		mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
131 		otx2_tim_dbg("Using single producer mode");
132 		tim_ring->prod_type_sp = true;
133 	}
134 
135 	snprintf(pool_name, sizeof(pool_name), "otx2_tim_chunk_pool%d",
136 		 tim_ring->ring_id);
137 
138 	if (cache_sz > RTE_MEMPOOL_CACHE_MAX_SIZE)
139 		cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
140 
141 	if (!tim_ring->disable_npa) {
142 		tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
143 				tim_ring->nb_chunks, tim_ring->chunk_sz,
144 				cache_sz, 0, rte_socket_id(), mp_flags);
145 
146 		if (tim_ring->chunk_pool == NULL) {
147 			otx2_err("Unable to create chunkpool.");
148 			return -ENOMEM;
149 		}
150 
151 		rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
152 						rte_mbuf_platform_mempool_ops(),
153 						NULL);
154 		if (rc < 0) {
155 			otx2_err("Unable to set chunkpool ops");
156 			goto free;
157 		}
158 
159 		rc = rte_mempool_populate_default(tim_ring->chunk_pool);
160 		if (rc < 0) {
161 			otx2_err("Unable to set populate chunkpool.");
162 			goto free;
163 		}
164 		tim_ring->aura = npa_lf_aura_handle_to_aura(
165 				tim_ring->chunk_pool->pool_id);
166 		tim_ring->ena_dfb = 0;
167 	} else {
168 		tim_ring->chunk_pool = rte_mempool_create(pool_name,
169 				tim_ring->nb_chunks, tim_ring->chunk_sz,
170 				cache_sz, 0, NULL, NULL, NULL, NULL,
171 				rte_socket_id(),
172 				mp_flags);
173 		if (tim_ring->chunk_pool == NULL) {
174 			otx2_err("Unable to create chunkpool.");
175 			return -ENOMEM;
176 		}
177 		tim_ring->ena_dfb = 1;
178 	}
179 
180 	return 0;
181 
182 free:
183 	rte_mempool_free(tim_ring->chunk_pool);
184 	return rc;
185 }
186 
187 static void
tim_err_desc(int rc)188 tim_err_desc(int rc)
189 {
190 	switch (rc) {
191 	case TIM_AF_NO_RINGS_LEFT:
192 		otx2_err("Unable to allocat new TIM ring.");
193 		break;
194 	case TIM_AF_INVALID_NPA_PF_FUNC:
195 		otx2_err("Invalid NPA pf func.");
196 		break;
197 	case TIM_AF_INVALID_SSO_PF_FUNC:
198 		otx2_err("Invalid SSO pf func.");
199 		break;
200 	case TIM_AF_RING_STILL_RUNNING:
201 		otx2_tim_dbg("Ring busy.");
202 		break;
203 	case TIM_AF_LF_INVALID:
204 		otx2_err("Invalid Ring id.");
205 		break;
206 	case TIM_AF_CSIZE_NOT_ALIGNED:
207 		otx2_err("Chunk size specified needs to be multiple of 16.");
208 		break;
209 	case TIM_AF_CSIZE_TOO_SMALL:
210 		otx2_err("Chunk size too small.");
211 		break;
212 	case TIM_AF_CSIZE_TOO_BIG:
213 		otx2_err("Chunk size too big.");
214 		break;
215 	case TIM_AF_INTERVAL_TOO_SMALL:
216 		otx2_err("Bucket traversal interval too small.");
217 		break;
218 	case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
219 		otx2_err("Invalid Big endian value.");
220 		break;
221 	case TIM_AF_INVALID_CLOCK_SOURCE:
222 		otx2_err("Invalid Clock source specified.");
223 		break;
224 	case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
225 		otx2_err("GPIO clock source not enabled.");
226 		break;
227 	case TIM_AF_INVALID_BSIZE:
228 		otx2_err("Invalid bucket size.");
229 		break;
230 	case TIM_AF_INVALID_ENABLE_PERIODIC:
231 		otx2_err("Invalid bucket size.");
232 		break;
233 	case TIM_AF_INVALID_ENABLE_DONTFREE:
234 		otx2_err("Invalid Don't free value.");
235 		break;
236 	case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
237 		otx2_err("Don't free bit not set when periodic is enabled.");
238 		break;
239 	case TIM_AF_RING_ALREADY_DISABLED:
240 		otx2_err("Ring already stopped");
241 		break;
242 	default:
243 		otx2_err("Unknown Error.");
244 	}
245 }
246 
247 static int
otx2_tim_ring_create(struct rte_event_timer_adapter * adptr)248 otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
249 {
250 	struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
251 	struct otx2_tim_evdev *dev = tim_priv_get();
252 	struct otx2_tim_ring *tim_ring;
253 	struct tim_config_req *cfg_req;
254 	struct tim_ring_req *free_req;
255 	struct tim_lf_alloc_req *req;
256 	struct tim_lf_alloc_rsp *rsp;
257 	int i, rc;
258 
259 	if (dev == NULL)
260 		return -ENODEV;
261 
262 	if (adptr->data->id >= dev->nb_rings)
263 		return -ENODEV;
264 
265 	req = otx2_mbox_alloc_msg_tim_lf_alloc(dev->mbox);
266 	req->npa_pf_func = otx2_npa_pf_func_get();
267 	req->sso_pf_func = otx2_sso_pf_func_get();
268 	req->ring = adptr->data->id;
269 
270 	rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
271 	if (rc < 0) {
272 		tim_err_desc(rc);
273 		return -ENODEV;
274 	}
275 
276 	if (NSEC2TICK(RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10),
277 		      rsp->tenns_clk) < OTX2_TIM_MIN_TMO_TKS) {
278 		if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
279 			rcfg->timer_tick_ns = TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
280 					rsp->tenns_clk);
281 		else {
282 			rc = -ERANGE;
283 			goto rng_mem_err;
284 		}
285 	}
286 
287 	tim_ring = rte_zmalloc("otx2_tim_prv", sizeof(struct otx2_tim_ring), 0);
288 	if (tim_ring == NULL) {
289 		rc =  -ENOMEM;
290 		goto rng_mem_err;
291 	}
292 
293 	adptr->data->adapter_priv = tim_ring;
294 
295 	tim_ring->tenns_clk_freq = rsp->tenns_clk;
296 	tim_ring->clk_src = (int)rcfg->clk_src;
297 	tim_ring->ring_id = adptr->data->id;
298 	tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
299 	tim_ring->max_tout = rcfg->max_tmo_ns;
300 	tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
301 	tim_ring->chunk_sz = dev->chunk_sz;
302 	tim_ring->nb_timers = rcfg->nb_timers;
303 	tim_ring->disable_npa = dev->disable_npa;
304 	tim_ring->enable_stats = dev->enable_stats;
305 
306 	for (i = 0; i < dev->ring_ctl_cnt ; i++) {
307 		struct otx2_tim_ctl *ring_ctl = &dev->ring_ctl_data[i];
308 
309 		if (ring_ctl->ring == tim_ring->ring_id) {
310 			tim_ring->chunk_sz = ring_ctl->chunk_slots ?
311 				((uint32_t)(ring_ctl->chunk_slots + 1) *
312 				 OTX2_TIM_CHUNK_ALIGNMENT) : tim_ring->chunk_sz;
313 			tim_ring->enable_stats = ring_ctl->enable_stats;
314 			tim_ring->disable_npa = ring_ctl->disable_npa;
315 		}
316 	}
317 
318 	tim_ring->nb_chunks = tim_ring->nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
319 							tim_ring->chunk_sz);
320 	tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
321 
322 	/* Try to optimize the bucket parameters. */
323 	if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)) {
324 		if (rte_is_power_of_2(tim_ring->nb_bkts))
325 			tim_ring->optimized = true;
326 		else
327 			tim_optimze_bkt_param(tim_ring);
328 	}
329 
330 	if (tim_ring->disable_npa)
331 		tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
332 	else
333 		tim_ring->nb_chunks = tim_ring->nb_chunks + tim_ring->nb_bkts;
334 
335 	/* Create buckets. */
336 	tim_ring->bkt = rte_zmalloc("otx2_tim_bucket", (tim_ring->nb_bkts) *
337 				    sizeof(struct otx2_tim_bkt),
338 				    RTE_CACHE_LINE_SIZE);
339 	if (tim_ring->bkt == NULL)
340 		goto bkt_mem_err;
341 
342 	rc = tim_chnk_pool_create(tim_ring, rcfg);
343 	if (rc < 0)
344 		goto chnk_mem_err;
345 
346 	cfg_req = otx2_mbox_alloc_msg_tim_config_ring(dev->mbox);
347 
348 	cfg_req->ring = tim_ring->ring_id;
349 	cfg_req->bigendian = false;
350 	cfg_req->clocksource = tim_ring->clk_src;
351 	cfg_req->enableperiodic = false;
352 	cfg_req->enabledontfreebuffer = tim_ring->ena_dfb;
353 	cfg_req->bucketsize = tim_ring->nb_bkts;
354 	cfg_req->chunksize = tim_ring->chunk_sz;
355 	cfg_req->interval = NSEC2TICK(tim_ring->tck_nsec,
356 				      tim_ring->tenns_clk_freq);
357 
358 	rc = otx2_mbox_process(dev->mbox);
359 	if (rc < 0) {
360 		tim_err_desc(rc);
361 		goto chnk_mem_err;
362 	}
363 
364 	tim_ring->base = dev->bar2 +
365 		(RVU_BLOCK_ADDR_TIM << 20 | tim_ring->ring_id << 12);
366 
367 	rc = tim_register_irq(tim_ring->ring_id);
368 	if (rc < 0)
369 		goto chnk_mem_err;
370 
371 	otx2_write64((uint64_t)tim_ring->bkt,
372 		     tim_ring->base + TIM_LF_RING_BASE);
373 	otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
374 
375 	/* Set fastpath ops. */
376 	tim_set_fp_ops(tim_ring);
377 
378 	/* Update SSO xae count. */
379 	sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)tim_ring,
380 			 RTE_EVENT_TYPE_TIMER);
381 	sso_xae_reconfigure(dev->event_dev);
382 
383 	otx2_tim_dbg("Total memory used %"PRIu64"MB\n",
384 			(uint64_t)(((tim_ring->nb_chunks * tim_ring->chunk_sz)
385 			+ (tim_ring->nb_bkts * sizeof(struct otx2_tim_bkt))) /
386 			BIT_ULL(20)));
387 
388 	return rc;
389 
390 chnk_mem_err:
391 	rte_free(tim_ring->bkt);
392 bkt_mem_err:
393 	rte_free(tim_ring);
394 rng_mem_err:
395 	free_req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
396 	free_req->ring = adptr->data->id;
397 	otx2_mbox_process(dev->mbox);
398 	return rc;
399 }
400 
401 static void
otx2_tim_calibrate_start_tsc(struct otx2_tim_ring * tim_ring)402 otx2_tim_calibrate_start_tsc(struct otx2_tim_ring *tim_ring)
403 {
404 #define OTX2_TIM_CALIB_ITER	1E6
405 	uint32_t real_bkt, bucket;
406 	int icount, ecount = 0;
407 	uint64_t bkt_cyc;
408 
409 	for (icount = 0; icount < OTX2_TIM_CALIB_ITER; icount++) {
410 		real_bkt = otx2_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
411 		bkt_cyc = rte_rdtsc();
412 		bucket = (bkt_cyc - tim_ring->ring_start_cyc) /
413 							tim_ring->tck_int;
414 		bucket = bucket % (tim_ring->nb_bkts);
415 		tim_ring->ring_start_cyc = bkt_cyc - (real_bkt *
416 							tim_ring->tck_int);
417 		if (bucket != real_bkt)
418 			ecount++;
419 	}
420 	tim_ring->last_updt_cyc = bkt_cyc;
421 	otx2_tim_dbg("Bucket mispredict %3.2f distance %d\n",
422 		     100 - (((double)(icount - ecount) / (double)icount) * 100),
423 		     bucket - real_bkt);
424 }
425 
426 static int
otx2_tim_ring_start(const struct rte_event_timer_adapter * adptr)427 otx2_tim_ring_start(const struct rte_event_timer_adapter *adptr)
428 {
429 	struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
430 	struct otx2_tim_evdev *dev = tim_priv_get();
431 	struct tim_enable_rsp *rsp;
432 	struct tim_ring_req *req;
433 	int rc;
434 
435 	if (dev == NULL)
436 		return -ENODEV;
437 
438 	req = otx2_mbox_alloc_msg_tim_enable_ring(dev->mbox);
439 	req->ring = tim_ring->ring_id;
440 
441 	rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
442 	if (rc < 0) {
443 		tim_err_desc(rc);
444 		goto fail;
445 	}
446 #ifdef RTE_ARM_EAL_RDTSC_USE_PMU
447 	uint64_t tenns_stmp, tenns_diff;
448 	uint64_t pmu_stmp;
449 
450 	pmu_stmp = rte_rdtsc();
451 	asm volatile("mrs %0, cntvct_el0" : "=r" (tenns_stmp));
452 
453 	tenns_diff = tenns_stmp - rsp->timestarted;
454 	pmu_stmp = pmu_stmp - (NSEC2TICK(tenns_diff  * 10, rte_get_timer_hz()));
455 	tim_ring->ring_start_cyc = pmu_stmp;
456 #else
457 	tim_ring->ring_start_cyc = rsp->timestarted;
458 #endif
459 	tim_ring->tck_int = NSEC2TICK(tim_ring->tck_nsec, rte_get_timer_hz());
460 	tim_ring->tot_int = tim_ring->tck_int * tim_ring->nb_bkts;
461 	tim_ring->fast_div = rte_reciprocal_value_u64(tim_ring->tck_int);
462 
463 	otx2_tim_calibrate_start_tsc(tim_ring);
464 
465 fail:
466 	return rc;
467 }
468 
469 static int
otx2_tim_ring_stop(const struct rte_event_timer_adapter * adptr)470 otx2_tim_ring_stop(const struct rte_event_timer_adapter *adptr)
471 {
472 	struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
473 	struct otx2_tim_evdev *dev = tim_priv_get();
474 	struct tim_ring_req *req;
475 	int rc;
476 
477 	if (dev == NULL)
478 		return -ENODEV;
479 
480 	req = otx2_mbox_alloc_msg_tim_disable_ring(dev->mbox);
481 	req->ring = tim_ring->ring_id;
482 
483 	rc = otx2_mbox_process(dev->mbox);
484 	if (rc < 0) {
485 		tim_err_desc(rc);
486 		rc = -EBUSY;
487 	}
488 
489 	return rc;
490 }
491 
492 static int
otx2_tim_ring_free(struct rte_event_timer_adapter * adptr)493 otx2_tim_ring_free(struct rte_event_timer_adapter *adptr)
494 {
495 	struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
496 	struct otx2_tim_evdev *dev = tim_priv_get();
497 	struct tim_ring_req *req;
498 	int rc;
499 
500 	if (dev == NULL)
501 		return -ENODEV;
502 
503 	tim_unregister_irq(tim_ring->ring_id);
504 
505 	req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
506 	req->ring = tim_ring->ring_id;
507 
508 	rc = otx2_mbox_process(dev->mbox);
509 	if (rc < 0) {
510 		tim_err_desc(rc);
511 		return -EBUSY;
512 	}
513 
514 	rte_free(tim_ring->bkt);
515 	rte_mempool_free(tim_ring->chunk_pool);
516 	rte_free(adptr->data->adapter_priv);
517 
518 	return 0;
519 }
520 
521 static int
otx2_tim_stats_get(const struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_stats * stats)522 otx2_tim_stats_get(const struct rte_event_timer_adapter *adapter,
523 		   struct rte_event_timer_adapter_stats *stats)
524 {
525 	struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
526 	uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
527 
528 
529 	stats->evtim_exp_count = __atomic_load_n(&tim_ring->arm_cnt,
530 						 __ATOMIC_RELAXED);
531 	stats->ev_enq_count = stats->evtim_exp_count;
532 	stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
533 				&tim_ring->fast_div);
534 	return 0;
535 }
536 
537 static int
otx2_tim_stats_reset(const struct rte_event_timer_adapter * adapter)538 otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
539 {
540 	struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
541 
542 	__atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED);
543 	return 0;
544 }
545 
546 int
otx2_tim_caps_get(const struct rte_eventdev * evdev,uint64_t flags,uint32_t * caps,const struct rte_event_timer_adapter_ops ** ops)547 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
548 		  uint32_t *caps,
549 		  const struct rte_event_timer_adapter_ops **ops)
550 {
551 	struct otx2_tim_evdev *dev = tim_priv_get();
552 
553 	RTE_SET_USED(flags);
554 
555 	if (dev == NULL)
556 		return -ENODEV;
557 
558 	otx2_tim_ops.init = otx2_tim_ring_create;
559 	otx2_tim_ops.uninit = otx2_tim_ring_free;
560 	otx2_tim_ops.start = otx2_tim_ring_start;
561 	otx2_tim_ops.stop = otx2_tim_ring_stop;
562 	otx2_tim_ops.get_info	= otx2_tim_ring_info_get;
563 
564 	if (dev->enable_stats) {
565 		otx2_tim_ops.stats_get   = otx2_tim_stats_get;
566 		otx2_tim_ops.stats_reset = otx2_tim_stats_reset;
567 	}
568 
569 	/* Store evdev pointer for later use. */
570 	dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev;
571 	*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
572 	*ops = &otx2_tim_ops;
573 
574 	return 0;
575 }
576 
577 #define OTX2_TIM_DISABLE_NPA	"tim_disable_npa"
578 #define OTX2_TIM_CHNK_SLOTS	"tim_chnk_slots"
579 #define OTX2_TIM_STATS_ENA	"tim_stats_ena"
580 #define OTX2_TIM_RINGS_LMT	"tim_rings_lmt"
581 #define OTX2_TIM_RING_CTL	"tim_ring_ctl"
582 
583 static void
tim_parse_ring_param(char * value,void * opaque)584 tim_parse_ring_param(char *value, void *opaque)
585 {
586 	struct otx2_tim_evdev *dev = opaque;
587 	struct otx2_tim_ctl ring_ctl = {0};
588 	char *tok = strtok(value, "-");
589 	struct otx2_tim_ctl *old_ptr;
590 	uint16_t *val;
591 
592 	val = (uint16_t *)&ring_ctl;
593 
594 	if (!strlen(value))
595 		return;
596 
597 	while (tok != NULL) {
598 		*val = atoi(tok);
599 		tok = strtok(NULL, "-");
600 		val++;
601 	}
602 
603 	if (val != (&ring_ctl.enable_stats + 1)) {
604 		otx2_err(
605 		"Invalid ring param expected [ring-chunk_sz-disable_npa-enable_stats]");
606 		return;
607 	}
608 
609 	dev->ring_ctl_cnt++;
610 	old_ptr = dev->ring_ctl_data;
611 	dev->ring_ctl_data = rte_realloc(dev->ring_ctl_data,
612 					 sizeof(struct otx2_tim_ctl) *
613 					 dev->ring_ctl_cnt, 0);
614 	if (dev->ring_ctl_data == NULL) {
615 		dev->ring_ctl_data = old_ptr;
616 		dev->ring_ctl_cnt--;
617 		return;
618 	}
619 
620 	dev->ring_ctl_data[dev->ring_ctl_cnt - 1] = ring_ctl;
621 }
622 
623 static void
tim_parse_ring_ctl_list(const char * value,void * opaque)624 tim_parse_ring_ctl_list(const char *value, void *opaque)
625 {
626 	char *s = strdup(value);
627 	char *start = NULL;
628 	char *end = NULL;
629 	char *f = s;
630 
631 	while (*s) {
632 		if (*s == '[')
633 			start = s;
634 		else if (*s == ']')
635 			end = s;
636 
637 		if (start && start < end) {
638 			*end = 0;
639 			tim_parse_ring_param(start + 1, opaque);
640 			start = end;
641 			s = end;
642 		}
643 		s++;
644 	}
645 
646 	free(f);
647 }
648 
649 static int
tim_parse_kvargs_dict(const char * key,const char * value,void * opaque)650 tim_parse_kvargs_dict(const char *key, const char *value, void *opaque)
651 {
652 	RTE_SET_USED(key);
653 
654 	/* Dict format [ring-chunk_sz-disable_npa-enable_stats] use '-' as ','
655 	 * isn't allowed. 0 represents default.
656 	 */
657 	tim_parse_ring_ctl_list(value, opaque);
658 
659 	return 0;
660 }
661 
662 static void
tim_parse_devargs(struct rte_devargs * devargs,struct otx2_tim_evdev * dev)663 tim_parse_devargs(struct rte_devargs *devargs, struct otx2_tim_evdev *dev)
664 {
665 	struct rte_kvargs *kvlist;
666 
667 	if (devargs == NULL)
668 		return;
669 
670 	kvlist = rte_kvargs_parse(devargs->args, NULL);
671 	if (kvlist == NULL)
672 		return;
673 
674 	rte_kvargs_process(kvlist, OTX2_TIM_DISABLE_NPA,
675 			   &parse_kvargs_flag, &dev->disable_npa);
676 	rte_kvargs_process(kvlist, OTX2_TIM_CHNK_SLOTS,
677 			   &parse_kvargs_value, &dev->chunk_slots);
678 	rte_kvargs_process(kvlist, OTX2_TIM_STATS_ENA, &parse_kvargs_flag,
679 			   &dev->enable_stats);
680 	rte_kvargs_process(kvlist, OTX2_TIM_RINGS_LMT, &parse_kvargs_value,
681 			   &dev->min_ring_cnt);
682 	rte_kvargs_process(kvlist, OTX2_TIM_RING_CTL,
683 			   &tim_parse_kvargs_dict, &dev);
684 
685 	rte_kvargs_free(kvlist);
686 }
687 
688 void
otx2_tim_init(struct rte_pci_device * pci_dev,struct otx2_dev * cmn_dev)689 otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev)
690 {
691 	struct rsrc_attach_req *atch_req;
692 	struct rsrc_detach_req *dtch_req;
693 	struct free_rsrcs_rsp *rsrc_cnt;
694 	const struct rte_memzone *mz;
695 	struct otx2_tim_evdev *dev;
696 	int rc;
697 
698 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
699 		return;
700 
701 	mz = rte_memzone_reserve(RTE_STR(OTX2_TIM_EVDEV_NAME),
702 				 sizeof(struct otx2_tim_evdev),
703 				 rte_socket_id(), 0);
704 	if (mz == NULL) {
705 		otx2_tim_dbg("Unable to allocate memory for TIM Event device");
706 		return;
707 	}
708 
709 	dev = mz->addr;
710 	dev->pci_dev = pci_dev;
711 	dev->mbox = cmn_dev->mbox;
712 	dev->bar2 = cmn_dev->bar2;
713 
714 	tim_parse_devargs(pci_dev->device.devargs, dev);
715 
716 	otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
717 	rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
718 	if (rc < 0) {
719 		otx2_err("Unable to get free rsrc count.");
720 		goto mz_free;
721 	}
722 
723 	dev->nb_rings = dev->min_ring_cnt ?
724 		RTE_MIN(dev->min_ring_cnt, rsrc_cnt->tim) : rsrc_cnt->tim;
725 
726 	if (!dev->nb_rings) {
727 		otx2_tim_dbg("No TIM Logical functions provisioned.");
728 		goto mz_free;
729 	}
730 
731 	atch_req = otx2_mbox_alloc_msg_attach_resources(dev->mbox);
732 	atch_req->modify = true;
733 	atch_req->timlfs = dev->nb_rings;
734 
735 	rc = otx2_mbox_process(dev->mbox);
736 	if (rc < 0) {
737 		otx2_err("Unable to attach TIM rings.");
738 		goto mz_free;
739 	}
740 
741 	rc = tim_get_msix_offsets();
742 	if (rc < 0) {
743 		otx2_err("Unable to get MSIX offsets for TIM.");
744 		goto detach;
745 	}
746 
747 	if (dev->chunk_slots &&
748 	    dev->chunk_slots <= OTX2_TIM_MAX_CHUNK_SLOTS &&
749 	    dev->chunk_slots >= OTX2_TIM_MIN_CHUNK_SLOTS) {
750 		dev->chunk_sz = (dev->chunk_slots + 1) *
751 			OTX2_TIM_CHUNK_ALIGNMENT;
752 	} else {
753 		dev->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
754 	}
755 
756 	return;
757 
758 detach:
759 	dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
760 	dtch_req->partial = true;
761 	dtch_req->timlfs = true;
762 
763 	otx2_mbox_process(dev->mbox);
764 mz_free:
765 	rte_memzone_free(mz);
766 }
767 
768 void
otx2_tim_fini(void)769 otx2_tim_fini(void)
770 {
771 	struct otx2_tim_evdev *dev = tim_priv_get();
772 	struct rsrc_detach_req *dtch_req;
773 
774 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
775 		return;
776 
777 	dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
778 	dtch_req->partial = true;
779 	dtch_req->timlfs = true;
780 
781 	otx2_mbox_process(dev->mbox);
782 	rte_memzone_free(rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME)));
783 }
784