1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <math.h> 6 7 #include "cnxk_eventdev.h" 8 #include "cnxk_tim_evdev.h" 9 10 static struct event_timer_adapter_ops cnxk_tim_ops; 11 12 static int 13 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring, 14 struct rte_event_timer_adapter_conf *rcfg) 15 { 16 unsigned int cache_sz = (tim_ring->nb_chunks / 1.5); 17 unsigned int mp_flags = 0; 18 char pool_name[25]; 19 int rc; 20 21 cache_sz /= rte_lcore_count(); 22 /* Create chunk pool. */ 23 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) { 24 mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET; 25 plt_tim_dbg("Using single producer mode"); 26 tim_ring->prod_type_sp = true; 27 } 28 29 snprintf(pool_name, sizeof(pool_name), "cnxk_tim_chunk_pool%d", 30 tim_ring->ring_id); 31 32 if (cache_sz > CNXK_TIM_MAX_POOL_CACHE_SZ) 33 cache_sz = CNXK_TIM_MAX_POOL_CACHE_SZ; 34 cache_sz = cache_sz != 0 ? cache_sz : 2; 35 tim_ring->nb_chunks += (cache_sz * rte_lcore_count()); 36 if (!tim_ring->disable_npa) { 37 tim_ring->chunk_pool = rte_mempool_create_empty( 38 pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz, 39 cache_sz, 0, rte_socket_id(), mp_flags); 40 41 if (tim_ring->chunk_pool == NULL) { 42 plt_err("Unable to create chunkpool."); 43 return -ENOMEM; 44 } 45 46 rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool, 47 rte_mbuf_platform_mempool_ops(), 48 NULL); 49 if (rc < 0) { 50 plt_err("Unable to set chunkpool ops"); 51 goto free; 52 } 53 54 rc = rte_mempool_populate_default(tim_ring->chunk_pool); 55 if (rc < 0) { 56 plt_err("Unable to set populate chunkpool."); 57 goto free; 58 } 59 tim_ring->aura = roc_npa_aura_handle_to_aura( 60 tim_ring->chunk_pool->pool_id); 61 tim_ring->ena_dfb = tim_ring->ena_periodic ? 1 : 0; 62 } else { 63 tim_ring->chunk_pool = rte_mempool_create( 64 pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz, 65 cache_sz, 0, NULL, NULL, NULL, NULL, rte_socket_id(), 66 mp_flags); 67 if (tim_ring->chunk_pool == NULL) { 68 plt_err("Unable to create chunkpool."); 69 return -ENOMEM; 70 } 71 tim_ring->ena_dfb = 1; 72 } 73 74 return 0; 75 76 free: 77 rte_mempool_free(tim_ring->chunk_pool); 78 return rc; 79 } 80 81 static void 82 cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring) 83 { 84 uint8_t prod_flag = !tim_ring->prod_type_sp; 85 86 /* [STATS] [DFB/FB] [SP][MP]*/ 87 const rte_event_timer_arm_burst_t arm_burst[2][2][2] = { 88 #define FP(_name, _f3, _f2, _f1, flags) \ 89 [_f3][_f2][_f1] = cnxk_tim_arm_burst_##_name, 90 TIM_ARM_FASTPATH_MODES 91 #undef FP 92 }; 93 94 const rte_event_timer_arm_tmo_tick_burst_t arm_tmo_burst[2][2] = { 95 #define FP(_name, _f2, _f1, flags) \ 96 [_f2][_f1] = cnxk_tim_arm_tmo_tick_burst_##_name, 97 TIM_ARM_TMO_FASTPATH_MODES 98 #undef FP 99 }; 100 101 cnxk_tim_ops.arm_burst = 102 arm_burst[tim_ring->enable_stats][tim_ring->ena_dfb][prod_flag]; 103 cnxk_tim_ops.arm_tmo_tick_burst = 104 arm_tmo_burst[tim_ring->enable_stats][tim_ring->ena_dfb]; 105 cnxk_tim_ops.cancel_burst = cnxk_tim_timer_cancel_burst; 106 } 107 108 static void 109 cnxk_tim_ring_info_get(const struct rte_event_timer_adapter *adptr, 110 struct rte_event_timer_adapter_info *adptr_info) 111 { 112 struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv; 113 114 adptr_info->max_tmo_ns = tim_ring->max_tout; 115 adptr_info->min_resolution_ns = tim_ring->ena_periodic ? 116 tim_ring->max_tout : 117 tim_ring->tck_nsec; 118 rte_memcpy(&adptr_info->conf, &adptr->data->conf, 119 sizeof(struct rte_event_timer_adapter_conf)); 120 } 121 122 static inline void 123 sort_multi_array(double ref_arr[], uint64_t arr1[], uint64_t arr2[], 124 uint64_t arr3[], uint8_t sz) 125 { 126 int x; 127 128 for (x = 0; x < sz - 1; x++) { 129 if (ref_arr[x] > ref_arr[x + 1]) { 130 PLT_SWAP(ref_arr[x], ref_arr[x + 1]); 131 PLT_SWAP(arr1[x], arr1[x + 1]); 132 PLT_SWAP(arr2[x], arr2[x + 1]); 133 PLT_SWAP(arr3[x], arr3[x + 1]); 134 x = -1; 135 } 136 } 137 } 138 139 static inline void 140 populate_sample(uint64_t tck[], uint64_t ns[], double diff[], uint64_t dst[], 141 uint64_t req_tck, uint64_t clk_freq, double tck_ns, uint8_t sz, 142 bool mov_fwd) 143 { 144 int i; 145 146 for (i = 0; i < sz; i++) { 147 tck[i] = i ? tck[i - 1] : req_tck; 148 do { 149 mov_fwd ? tck[i]++ : tck[i]--; 150 ns[i] = round((double)tck[i] * tck_ns); 151 if (round((double)tck[i] * tck_ns) > 152 ((double)tck[i] * tck_ns)) 153 continue; 154 } while (ns[i] % (uint64_t)cnxk_tim_ns_per_tck(clk_freq)); 155 diff[i] = PLT_MAX((double)ns[i], (double)tck[i] * tck_ns) - 156 PLT_MIN((double)ns[i], (double)tck[i] * tck_ns); 157 dst[i] = mov_fwd ? tck[i] - req_tck : req_tck - tck[i]; 158 } 159 } 160 161 static void 162 tim_adjust_resolution(uint64_t *req_ns, uint64_t *req_tck, double tck_ns, 163 uint64_t clk_freq, uint64_t max_tmo, uint64_t m_tck) 164 { 165 #define MAX_SAMPLES 5 166 double rmax_diff[MAX_SAMPLES], rmin_diff[MAX_SAMPLES]; 167 uint64_t min_tck[MAX_SAMPLES], max_tck[MAX_SAMPLES]; 168 uint64_t min_dst[MAX_SAMPLES], max_dst[MAX_SAMPLES]; 169 uint64_t min_ns[MAX_SAMPLES], max_ns[MAX_SAMPLES]; 170 int i; 171 172 populate_sample(max_tck, max_ns, rmax_diff, max_dst, *req_tck, clk_freq, 173 tck_ns, MAX_SAMPLES, true); 174 sort_multi_array(rmax_diff, max_dst, max_tck, max_ns, MAX_SAMPLES); 175 176 populate_sample(min_tck, min_ns, rmin_diff, min_dst, *req_tck, clk_freq, 177 tck_ns, MAX_SAMPLES, false); 178 sort_multi_array(rmin_diff, min_dst, min_tck, min_ns, MAX_SAMPLES); 179 180 for (i = 0; i < MAX_SAMPLES; i++) { 181 if (min_dst[i] < max_dst[i] && min_tck[i] > m_tck && 182 (max_tmo / min_ns[i]) <= 183 (TIM_MAX_BUCKET_SIZE - TIM_MIN_BUCKET_SIZE)) { 184 *req_tck = min_tck[i]; 185 *req_ns = min_ns[i]; 186 break; 187 } else if ((max_tmo / max_ns[i]) < 188 (TIM_MAX_BUCKET_SIZE - TIM_MIN_BUCKET_SIZE)) { 189 *req_tck = max_tck[i]; 190 *req_ns = max_ns[i]; 191 break; 192 } 193 } 194 } 195 196 static int 197 cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr) 198 { 199 struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf; 200 struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); 201 uint64_t min_intvl_ns, min_intvl_cyc; 202 struct cnxk_tim_ring *tim_ring; 203 enum roc_tim_clk_src clk_src; 204 uint64_t clk_freq = 0; 205 int i, rc; 206 207 if (dev == NULL) 208 return -ENODEV; 209 210 if (adptr->data->id >= dev->nb_rings) 211 return -ENODEV; 212 213 tim_ring = rte_zmalloc("cnxk_tim_prv", sizeof(struct cnxk_tim_ring), 0); 214 if (tim_ring == NULL) 215 return -ENOMEM; 216 217 rc = roc_tim_lf_alloc(&dev->tim, adptr->data->id, NULL); 218 if (rc < 0) { 219 plt_err("Failed to create timer ring"); 220 goto tim_ring_free; 221 } 222 223 clk_src = cnxk_tim_convert_clk_src(rcfg->clk_src); 224 if (clk_src == ROC_TIM_CLK_SRC_INVALID) { 225 plt_err("Invalid clock source"); 226 goto tim_hw_free; 227 } 228 229 rc = cnxk_tim_get_clk_freq(dev, clk_src, &clk_freq); 230 if (rc < 0) { 231 plt_err("Failed to get clock frequency"); 232 goto tim_hw_free; 233 } 234 235 rc = roc_tim_lf_interval(&dev->tim, clk_src, clk_freq, &min_intvl_ns, 236 &min_intvl_cyc); 237 if (rc < 0) { 238 plt_err("Failed to get min interval details"); 239 goto tim_hw_free; 240 } 241 242 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) { 243 /* Use 2 buckets to avoid contention */ 244 rcfg->timer_tick_ns /= 2; 245 tim_ring->ena_periodic = 1; 246 } 247 248 if (rcfg->timer_tick_ns < min_intvl_ns) { 249 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES) { 250 rcfg->timer_tick_ns = min_intvl_ns; 251 } else { 252 rc = -ERANGE; 253 goto tim_hw_free; 254 } 255 } 256 257 if (tim_ring->ena_periodic) 258 rcfg->max_tmo_ns = rcfg->timer_tick_ns * 2; 259 260 if (rcfg->timer_tick_ns > rcfg->max_tmo_ns) { 261 plt_err("Max timeout to too high"); 262 rc = -ERANGE; 263 goto tim_hw_free; 264 } 265 266 tim_ring->tck_nsec = 267 round(RTE_ALIGN_MUL_NEAR((long double)rcfg->timer_tick_ns, 268 cnxk_tim_ns_per_tck(clk_freq))); 269 if (log10(clk_freq) - floor(log10(clk_freq)) != 0.0) { 270 uint64_t req_ns, req_tck; 271 double tck_ns; 272 273 req_ns = tim_ring->tck_nsec; 274 tck_ns = NSECPERSEC / clk_freq; 275 req_tck = round(rcfg->timer_tick_ns / tck_ns); 276 tim_adjust_resolution(&req_ns, &req_tck, tck_ns, clk_freq, 277 rcfg->max_tmo_ns, min_intvl_cyc); 278 if ((tim_ring->tck_nsec != req_ns) && 279 !(rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)) { 280 rc = -ERANGE; 281 goto tim_hw_free; 282 } 283 tim_ring->tck_nsec = ceil(req_tck * tck_ns); 284 } 285 286 tim_ring->tck_int = round((long double)tim_ring->tck_nsec / 287 cnxk_tim_ns_per_tck(clk_freq)); 288 tim_ring->tck_nsec = 289 ceil(tim_ring->tck_int * cnxk_tim_ns_per_tck(clk_freq)); 290 291 tim_ring->ring_id = adptr->data->id; 292 tim_ring->clk_src = clk_src; 293 tim_ring->max_tout = rcfg->max_tmo_ns; 294 tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec); 295 tim_ring->nb_timers = rcfg->nb_timers; 296 tim_ring->chunk_sz = dev->chunk_sz; 297 tim_ring->disable_npa = dev->disable_npa; 298 tim_ring->enable_stats = dev->enable_stats; 299 300 for (i = 0; i < dev->ring_ctl_cnt; i++) { 301 struct cnxk_tim_ctl *ring_ctl = &dev->ring_ctl_data[i]; 302 303 if (ring_ctl->ring == tim_ring->ring_id) { 304 tim_ring->chunk_sz = 305 ring_ctl->chunk_slots ? 306 ((uint32_t)(ring_ctl->chunk_slots + 1) * 307 CNXK_TIM_CHUNK_ALIGNMENT) : 308 tim_ring->chunk_sz; 309 tim_ring->enable_stats = ring_ctl->enable_stats; 310 tim_ring->disable_npa = ring_ctl->disable_npa; 311 } 312 } 313 314 if (tim_ring->disable_npa) { 315 tim_ring->nb_chunks = 316 tim_ring->nb_timers / 317 CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz); 318 tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts; 319 } else { 320 tim_ring->nb_chunks = tim_ring->nb_timers; 321 } 322 323 tim_ring->nb_chunk_slots = CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz); 324 /* Create buckets. */ 325 tim_ring->bkt = 326 rte_zmalloc("cnxk_tim_bucket", 327 (tim_ring->nb_bkts) * sizeof(struct cnxk_tim_bkt), 328 RTE_CACHE_LINE_SIZE); 329 if (tim_ring->bkt == NULL) 330 goto tim_hw_free; 331 332 rc = cnxk_tim_chnk_pool_create(tim_ring, rcfg); 333 if (rc < 0) 334 goto tim_bkt_free; 335 336 rc = roc_tim_lf_config(&dev->tim, tim_ring->ring_id, clk_src, 337 tim_ring->ena_periodic, tim_ring->ena_dfb, 338 tim_ring->nb_bkts, tim_ring->chunk_sz, 339 tim_ring->tck_int, tim_ring->tck_nsec, clk_freq); 340 if (rc < 0) { 341 plt_err("Failed to configure timer ring"); 342 goto tim_chnk_free; 343 } 344 345 tim_ring->base = roc_tim_lf_base_get(&dev->tim, tim_ring->ring_id); 346 plt_write64((uint64_t)tim_ring->bkt, tim_ring->base + TIM_LF_RING_BASE); 347 plt_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA); 348 349 /* Set fastpath ops. */ 350 cnxk_tim_set_fp_ops(tim_ring); 351 352 /* Update SSO xae count. */ 353 cnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring, 354 RTE_EVENT_TYPE_TIMER); 355 cnxk_sso_xae_reconfigure(dev->event_dev); 356 357 plt_tim_dbg( 358 "Total memory used %" PRIu64 "MB\n", 359 (uint64_t)(((tim_ring->nb_chunks * tim_ring->chunk_sz) + 360 (tim_ring->nb_bkts * sizeof(struct cnxk_tim_bkt))) / 361 BIT_ULL(20))); 362 363 adptr->data->adapter_priv = tim_ring; 364 return rc; 365 366 tim_chnk_free: 367 rte_mempool_free(tim_ring->chunk_pool); 368 tim_bkt_free: 369 rte_free(tim_ring->bkt); 370 tim_hw_free: 371 roc_tim_lf_free(&dev->tim, tim_ring->ring_id); 372 tim_ring_free: 373 rte_free(tim_ring); 374 return rc; 375 } 376 377 static int 378 cnxk_tim_ring_free(struct rte_event_timer_adapter *adptr) 379 { 380 struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv; 381 struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); 382 383 if (dev == NULL) 384 return -ENODEV; 385 386 roc_tim_lf_free(&dev->tim, tim_ring->ring_id); 387 rte_free(tim_ring->bkt); 388 rte_mempool_free(tim_ring->chunk_pool); 389 rte_free(tim_ring); 390 391 return 0; 392 } 393 394 static void 395 cnxk_tim_calibrate_start_tsc(struct cnxk_tim_ring *tim_ring) 396 { 397 #define CNXK_TIM_CALIB_ITER 1E6 398 uint32_t real_bkt, bucket; 399 int icount, ecount = 0; 400 uint64_t bkt_cyc; 401 402 for (icount = 0; icount < CNXK_TIM_CALIB_ITER; icount++) { 403 real_bkt = plt_read64(tim_ring->base + TIM_LF_RING_REL) >> 44; 404 bkt_cyc = cnxk_tim_cntvct(); 405 bucket = (bkt_cyc - tim_ring->ring_start_cyc) / 406 tim_ring->tck_int; 407 bucket = bucket % (tim_ring->nb_bkts); 408 tim_ring->ring_start_cyc = 409 bkt_cyc - (real_bkt * tim_ring->tck_int); 410 if (bucket != real_bkt) 411 ecount++; 412 } 413 tim_ring->last_updt_cyc = bkt_cyc; 414 plt_tim_dbg("Bucket mispredict %3.2f distance %d\n", 415 100 - (((double)(icount - ecount) / (double)icount) * 100), 416 bucket - real_bkt); 417 } 418 419 static int 420 cnxk_tim_ring_start(const struct rte_event_timer_adapter *adptr) 421 { 422 struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv; 423 struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); 424 int rc; 425 426 if (dev == NULL) 427 return -ENODEV; 428 429 rc = roc_tim_lf_enable(&dev->tim, tim_ring->ring_id, 430 &tim_ring->ring_start_cyc, NULL); 431 if (rc < 0) 432 return rc; 433 434 tim_ring->tot_int = tim_ring->tck_int * tim_ring->nb_bkts; 435 tim_ring->fast_div = rte_reciprocal_value_u64(tim_ring->tck_int); 436 tim_ring->fast_bkt = rte_reciprocal_value_u64(tim_ring->nb_bkts); 437 438 cnxk_tim_calibrate_start_tsc(tim_ring); 439 440 return rc; 441 } 442 443 static int 444 cnxk_tim_ring_stop(const struct rte_event_timer_adapter *adptr) 445 { 446 struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv; 447 struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); 448 int rc; 449 450 if (dev == NULL) 451 return -ENODEV; 452 453 rc = roc_tim_lf_disable(&dev->tim, tim_ring->ring_id); 454 if (rc < 0) 455 plt_err("Failed to disable timer ring"); 456 457 return rc; 458 } 459 460 static int 461 cnxk_tim_stats_get(const struct rte_event_timer_adapter *adapter, 462 struct rte_event_timer_adapter_stats *stats) 463 { 464 struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv; 465 uint64_t bkt_cyc = cnxk_tim_cntvct() - tim_ring->ring_start_cyc; 466 467 stats->evtim_exp_count = 468 __atomic_load_n(&tim_ring->arm_cnt, __ATOMIC_RELAXED); 469 stats->ev_enq_count = stats->evtim_exp_count; 470 stats->adapter_tick_count = 471 rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div); 472 return 0; 473 } 474 475 static int 476 cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter) 477 { 478 struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv; 479 480 __atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED); 481 return 0; 482 } 483 484 int 485 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, 486 uint32_t *caps, const struct event_timer_adapter_ops **ops) 487 { 488 struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); 489 490 RTE_SET_USED(flags); 491 492 if (dev == NULL) 493 return -ENODEV; 494 495 cnxk_tim_ops.init = cnxk_tim_ring_create; 496 cnxk_tim_ops.uninit = cnxk_tim_ring_free; 497 cnxk_tim_ops.start = cnxk_tim_ring_start; 498 cnxk_tim_ops.stop = cnxk_tim_ring_stop; 499 cnxk_tim_ops.get_info = cnxk_tim_ring_info_get; 500 501 if (dev->enable_stats) { 502 cnxk_tim_ops.stats_get = cnxk_tim_stats_get; 503 cnxk_tim_ops.stats_reset = cnxk_tim_stats_reset; 504 } 505 506 /* Store evdev pointer for later use. */ 507 dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev; 508 *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT | 509 RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC; 510 *ops = &cnxk_tim_ops; 511 512 return 0; 513 } 514 515 static void 516 cnxk_tim_parse_ring_param(char *value, void *opaque) 517 { 518 struct cnxk_tim_evdev *dev = opaque; 519 struct cnxk_tim_ctl ring_ctl = {0}; 520 char *tok = strtok(value, "-"); 521 struct cnxk_tim_ctl *old_ptr; 522 uint16_t *val; 523 524 val = (uint16_t *)&ring_ctl; 525 526 if (!strlen(value)) 527 return; 528 529 while (tok != NULL) { 530 *val = atoi(tok); 531 tok = strtok(NULL, "-"); 532 val++; 533 } 534 535 if (val != (&ring_ctl.enable_stats + 1)) { 536 plt_err("Invalid ring param expected [ring-chunk_sz-disable_npa-enable_stats]"); 537 return; 538 } 539 540 dev->ring_ctl_cnt++; 541 old_ptr = dev->ring_ctl_data; 542 dev->ring_ctl_data = 543 rte_realloc(dev->ring_ctl_data, 544 sizeof(struct cnxk_tim_ctl) * dev->ring_ctl_cnt, 0); 545 if (dev->ring_ctl_data == NULL) { 546 dev->ring_ctl_data = old_ptr; 547 dev->ring_ctl_cnt--; 548 return; 549 } 550 551 dev->ring_ctl_data[dev->ring_ctl_cnt - 1] = ring_ctl; 552 } 553 554 static void 555 cnxk_tim_parse_ring_ctl_list(const char *value, void *opaque) 556 { 557 char *s = strdup(value); 558 char *start = NULL; 559 char *end = NULL; 560 char *f = s; 561 562 if (s == NULL || !strlen(s)) 563 goto free; 564 565 while (*s) { 566 if (*s == '[') 567 start = s; 568 else if (*s == ']') 569 end = s; 570 else 571 continue; 572 573 if (start && start < end) { 574 *end = 0; 575 cnxk_tim_parse_ring_param(start + 1, opaque); 576 start = end; 577 s = end; 578 } 579 s++; 580 } 581 582 free: 583 free(f); 584 } 585 586 static int 587 cnxk_tim_parse_kvargs_dict(const char *key, const char *value, void *opaque) 588 { 589 RTE_SET_USED(key); 590 591 /* Dict format [ring-chunk_sz-disable_npa-enable_stats] use '-' as ',' 592 * isn't allowed. 0 represents default. 593 */ 594 cnxk_tim_parse_ring_ctl_list(value, opaque); 595 596 return 0; 597 } 598 599 static void 600 cnxk_tim_parse_clk_list(const char *value, void *opaque) 601 { 602 enum roc_tim_clk_src src[] = {ROC_TIM_CLK_SRC_GPIO, ROC_TIM_CLK_SRC_PTP, 603 ROC_TIM_CLK_SRC_SYNCE, 604 ROC_TIM_CLK_SRC_INVALID}; 605 struct cnxk_tim_evdev *dev = opaque; 606 char *str = strdup(value); 607 char *tok; 608 int i = 0; 609 610 if (str == NULL || !strlen(str)) 611 goto free; 612 613 tok = strtok(str, "-"); 614 while (tok != NULL && src[i] != ROC_TIM_CLK_SRC_INVALID) { 615 dev->ext_clk_freq[src[i]] = strtoull(tok, NULL, 10); 616 tok = strtok(NULL, "-"); 617 i++; 618 } 619 620 free: 621 free(str); 622 } 623 624 static int 625 cnxk_tim_parse_kvargs_dsv(const char *key, const char *value, void *opaque) 626 { 627 RTE_SET_USED(key); 628 629 /* DSV format GPIO-PTP-SYNCE-BTS use '-' as ',' 630 * isn't allowed. 0 represents default. 631 */ 632 cnxk_tim_parse_clk_list(value, opaque); 633 634 return 0; 635 } 636 637 static void 638 cnxk_tim_parse_devargs(struct rte_devargs *devargs, struct cnxk_tim_evdev *dev) 639 { 640 struct rte_kvargs *kvlist; 641 642 if (devargs == NULL) 643 return; 644 645 kvlist = rte_kvargs_parse(devargs->args, NULL); 646 if (kvlist == NULL) 647 return; 648 649 rte_kvargs_process(kvlist, CNXK_TIM_DISABLE_NPA, &parse_kvargs_flag, 650 &dev->disable_npa); 651 rte_kvargs_process(kvlist, CNXK_TIM_CHNK_SLOTS, &parse_kvargs_value, 652 &dev->chunk_slots); 653 rte_kvargs_process(kvlist, CNXK_TIM_STATS_ENA, &parse_kvargs_flag, 654 &dev->enable_stats); 655 rte_kvargs_process(kvlist, CNXK_TIM_RINGS_LMT, &parse_kvargs_value, 656 &dev->min_ring_cnt); 657 rte_kvargs_process(kvlist, CNXK_TIM_RING_CTL, 658 &cnxk_tim_parse_kvargs_dict, &dev); 659 rte_kvargs_process(kvlist, CNXK_TIM_EXT_CLK, &cnxk_tim_parse_kvargs_dsv, 660 dev); 661 662 rte_kvargs_free(kvlist); 663 } 664 665 void 666 cnxk_tim_init(struct roc_sso *sso) 667 { 668 const struct rte_memzone *mz; 669 struct cnxk_tim_evdev *dev; 670 int rc; 671 672 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 673 return; 674 675 mz = rte_memzone_reserve(RTE_STR(CNXK_TIM_EVDEV_NAME), 676 sizeof(struct cnxk_tim_evdev), 0, 0); 677 if (mz == NULL) { 678 plt_tim_dbg("Unable to allocate memory for TIM Event device"); 679 return; 680 } 681 dev = mz->addr; 682 683 cnxk_tim_parse_devargs(sso->pci_dev->device.devargs, dev); 684 685 dev->tim.roc_sso = sso; 686 dev->tim.nb_lfs = dev->min_ring_cnt; 687 rc = roc_tim_init(&dev->tim); 688 if (rc < 0) { 689 plt_err("Failed to initialize roc tim resources"); 690 rte_memzone_free(mz); 691 return; 692 } 693 dev->nb_rings = rc; 694 695 if (dev->chunk_slots && dev->chunk_slots <= CNXK_TIM_MAX_CHUNK_SLOTS && 696 dev->chunk_slots >= CNXK_TIM_MIN_CHUNK_SLOTS) { 697 dev->chunk_sz = 698 (dev->chunk_slots + 1) * CNXK_TIM_CHUNK_ALIGNMENT; 699 } else { 700 dev->chunk_sz = CNXK_TIM_RING_DEF_CHUNK_SZ; 701 } 702 } 703 704 void 705 cnxk_tim_fini(void) 706 { 707 struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); 708 709 if (dev == NULL || rte_eal_process_type() != RTE_PROC_PRIMARY) 710 return; 711 712 roc_tim_fini(&dev->tim); 713 rte_memzone_free(rte_memzone_lookup(RTE_STR(CNXK_TIM_EVDEV_NAME))); 714 } 715