Lines Matching refs:sw

581 	struct swtim *sw;  in swtim_callback()  local
590 sw = swtim_pmd_priv(adapter); in swtim_callback()
592 ret = event_buffer_add(&sw->buffer, &evtim->ev); in swtim_callback()
598 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE, in swtim_callback()
604 sw->stats.evtim_retry_count++; in swtim_callback()
609 if (unlikely(sw->in_use[lcore].v == 0)) { in swtim_callback()
610 sw->in_use[lcore].v = 1; in swtim_callback()
611 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, in swtim_callback()
613 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore, in swtim_callback()
622 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) { in swtim_callback()
623 rte_mempool_put_bulk(sw->tim_pool, in swtim_callback()
624 (void **)sw->expired_timers, in swtim_callback()
625 sw->n_expired_timers); in swtim_callback()
626 sw->n_expired_timers = 0; in swtim_callback()
629 sw->expired_timers[sw->n_expired_timers++] = tim; in swtim_callback()
630 sw->stats.evtim_exp_count++; in swtim_callback()
636 if (event_buffer_batch_ready(&sw->buffer)) { in swtim_callback()
637 event_buffer_flush(&sw->buffer, in swtim_callback()
643 sw->stats.ev_enq_count += nb_evs_flushed; in swtim_callback()
644 sw->stats.ev_inv_count += nb_evs_invalid; in swtim_callback()
652 struct swtim *sw = swtim_pmd_priv(adapter); in get_timeout_cycles() local
653 uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns; in get_timeout_cycles()
661 swtim_did_tick(struct swtim *sw) in swtim_did_tick() argument
666 next_tick_cyclesp = &sw->next_tick_cycles; in swtim_did_tick()
667 cycles_per_adapter_tick = sw->timer_tick_ns * in swtim_did_tick()
694 struct swtim *sw = swtim_pmd_priv(adapter); in check_timeout() local
696 tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns; in check_timeout()
697 if (tmo_nsec > sw->max_tmo_ns) in check_timeout()
699 if (tmo_nsec < sw->timer_tick_ns) in check_timeout()
731 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_service_func() local
735 if (swtim_did_tick(sw)) { in swtim_service_func()
736 rte_timer_alt_manage(sw->timer_data_id, in swtim_service_func()
737 sw->poll_lcores, in swtim_service_func()
738 sw->n_poll_lcores, in swtim_service_func()
742 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers, in swtim_service_func()
743 sw->n_expired_timers); in swtim_service_func()
744 sw->n_expired_timers = 0; in swtim_service_func()
746 event_buffer_flush(&sw->buffer, in swtim_service_func()
752 sw->stats.ev_enq_count += nb_evs_flushed; in swtim_service_func()
753 sw->stats.ev_inv_count += nb_evs_invalid; in swtim_service_func()
754 sw->stats.adapter_tick_count++; in swtim_service_func()
794 struct swtim *sw; in swtim_init() local
803 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE, in swtim_init()
805 if (sw == NULL) { in swtim_init()
812 adapter->data->adapter_priv = sw; in swtim_init()
813 sw->adapter = adapter; in swtim_init()
815 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns; in swtim_init()
816 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns; in swtim_init()
828 sw->tim_pool = rte_mempool_create(pool_name, pool_size, in swtim_init()
831 if (sw->tim_pool == NULL) { in swtim_init()
839 sw->in_use[i].v = 0; in swtim_init()
851 ret = rte_timer_data_alloc(&sw->timer_data_id); in swtim_init()
859 event_buffer_init(&sw->buffer); in swtim_init()
861 sw->adapter = adapter; in swtim_init()
871 ret = rte_service_component_register(&service, &sw->service_id); in swtim_init()
874 ": err = %d", service.name, sw->service_id, in swtim_init()
882 sw->service_id); in swtim_init()
884 adapter->data->service_id = sw->service_id; in swtim_init()
889 rte_mempool_free(sw->tim_pool); in swtim_init()
891 rte_free(sw); in swtim_init()
898 struct swtim *sw = arg; in swtim_free_tim() local
900 rte_mempool_put(sw->tim_pool, tim); in swtim_free_tim()
910 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_uninit() local
913 rte_timer_stop_all(sw->timer_data_id, in swtim_uninit()
914 sw->poll_lcores, in swtim_uninit()
915 sw->n_poll_lcores, in swtim_uninit()
917 sw); in swtim_uninit()
919 ret = rte_service_component_unregister(sw->service_id); in swtim_uninit()
925 rte_mempool_free(sw->tim_pool); in swtim_uninit()
926 rte_free(sw); in swtim_uninit()
951 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_start() local
960 mapped_count = get_mapped_count_for_service(sw->service_id); in swtim_start()
965 return rte_service_component_runstate_set(sw->service_id, 1); in swtim_start()
972 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_stop() local
974 ret = rte_service_component_runstate_set(sw->service_id, 0); in swtim_stop()
979 while (rte_service_may_be_active(sw->service_id)) in swtim_stop()
989 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_get_info() local
990 adapter_info->min_resolution_ns = sw->timer_tick_ns; in swtim_get_info()
991 adapter_info->max_tmo_ns = sw->max_tmo_ns; in swtim_get_info()
998 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_stats_get() local
999 *stats = sw->stats; /* structure copy */ in swtim_stats_get()
1006 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_stats_reset() local
1007 memset(&sw->stats, 0, sizeof(sw->stats)); in swtim_stats_reset()
1017 struct swtim *sw = swtim_pmd_priv(adapter); in __swtim_arm_burst() local
1046 if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v, in __swtim_arm_burst()
1051 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, in __swtim_arm_burst()
1053 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id, in __swtim_arm_burst()
1057 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims, in __swtim_arm_burst()
1106 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles, in __swtim_arm_burst()
1125 rte_mempool_put_bulk(sw->tim_pool, in __swtim_arm_burst()
1147 struct swtim *sw = swtim_pmd_priv(adapter); in swtim_cancel_burst() local
1176 ret = rte_timer_alt_stop(sw->timer_data_id, timp); in swtim_cancel_burst()
1183 rte_mempool_put(sw->tim_pool, (void **)timp); in swtim_cancel_burst()