| /dpdk/app/test/ |
| H A D | test_rcu_qsbr_perf.c | 60 uint64_t begin, cycles; in test_rcu_qsbr_reader_perf() local 83 cycles = rte_rdtsc_precise() - begin; in test_rcu_qsbr_reader_perf() 101 uint64_t begin, cycles; in test_rcu_qsbr_writer_perf() local 115 cycles = rte_rdtsc_precise() - begin; in test_rcu_qsbr_writer_perf() 116 __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED); in test_rcu_qsbr_writer_perf() 289 uint64_t begin, cycles; in test_rcu_qsbr_hash_reader() local 319 cycles = rte_rdtsc_precise() - begin; in test_rcu_qsbr_hash_reader() 385 uint64_t token, begin, cycles; in test_rcu_qsbr_sw_sv_1qs() local 455 cycles = rte_rdtsc_precise() - begin; in test_rcu_qsbr_sw_sv_1qs() 504 uint64_t token, begin, cycles; in test_rcu_qsbr_sw_sv_1qs_non_blocking() local [all …]
|
| H A D | test_trace_perf.c | 49 double cycles, ns; in measure_perf() local 57 cycles = total_calls ? (double)total_cycles / (double)total_calls : 0; in measure_perf() 58 cycles /= STEP; in measure_perf() 59 cycles /= 100; /* CENT_OPS */ in measure_perf() 61 ns = (cycles / (double)hz) * 1E9; in measure_perf() 62 printf("%16s: cycles=%f ns=%f\n", str, cycles, ns); in measure_perf()
|
| H A D | test_efd_perf.c | 84 static uint64_t cycles[NUM_KEYSIZES][NUM_OPERATIONS]; variable 201 cycles[params->cycle][ADD] = time_taken / KEYS_TO_ADD; in timed_adds() 234 cycles[params->cycle][LOOKUP] = time_taken / NUM_LOOKUPS; in timed_lookups() 278 cycles[params->cycle][LOOKUP_MULTI] = time_taken / NUM_LOOKUPS; in timed_lookups_multi() 307 cycles[params->cycle][DELETE] = time_taken / KEYS_TO_ADD; in timed_deletes() 379 printf("%-18"PRIu64, cycles[i][j]); in run_all_tbl_perf_tests()
|
| H A D | test_hash_readwrite.c | 62 uint64_t begin, cycles; in test_hash_readwrite_worker() local 112 cycles = rte_rdtsc_precise() - begin; in test_hash_readwrite_worker() 113 __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED); in test_hash_readwrite_worker() 295 uint64_t begin, cycles; in test_rw_reader() local 312 cycles = rte_rdtsc_precise() - begin; in test_rw_reader() 313 __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED); in test_rw_reader() 323 uint64_t begin, cycles; in test_rw_writer() local 346 cycles = rte_rdtsc_precise() - begin; in test_rw_writer() 347 __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED); in test_rw_writer()
|
| H A D | test_member_perf.c | 79 static uint64_t cycles[NUM_TYPE][NUM_KEYSIZES][NUM_OPERATIONS]; variable 242 cycles[type][params->cycle][ADD] = time_taken / KEYS_TO_ADD; in timed_adds() 277 cycles[type][params->cycle][LOOKUP] = time_taken / NUM_LOOKUPS; in timed_lookups() 324 cycles[type][params->cycle][LOOKUP_BULK] = time_taken / NUM_LOOKUPS; in timed_lookups_bulk() 364 cycles[type][params->cycle][LOOKUP_MULTI] = time_taken / NUM_LOOKUPS; in timed_lookups_multimatch() 418 cycles[type][params->cycle][LOOKUP_MULTI_BULK] = time_taken / in timed_lookups_multimatch_bulk() 445 cycles[type][params->cycle][DELETE] = time_taken / KEYS_TO_ADD; in timed_deletes() 491 cycles[type][params->cycle][LOOKUP_MISS] = time_taken / NUM_LOOKUPS; in timed_miss_lookup() 596 printf("%-18"PRIu64, cycles[j][i][k]); in run_all_tbl_perf_tests()
|
| H A D | test_hash_multiwriter.c | 57 uint64_t begin, cycles; in test_hash_multiwriter_worker() local 86 cycles = rte_rdtsc_precise() - begin; in test_hash_multiwriter_worker() 87 __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED); in test_hash_multiwriter_worker()
|
| H A D | test_hash_perf.c | 59 static uint64_t cycles[NUM_KEYSIZES][NUM_OPERATIONS][2][2]; variable 306 cycles[table_index][OP_ADD][with_hash][with_data] = time_taken/keys_to_add; in timed_adds() 383 cycles[table_index][OP_LOOKUP][with_hash][with_data] = time_taken/num_lookups; in timed_lookups() 509 cycles[table_index][OP_LOOKUP_MULTI][with_hash][with_data] = in timed_lookups_multi() 548 cycles[table_index][OP_DELETE][with_hash][with_data] = time_taken/keys_to_add; in timed_deletes() 626 printf("%-18"PRIu64, cycles[i][j][with_hash][with_data]); in run_all_tbl_perf_tests()
|
| H A D | test_hash_readwrite_lf_perf.c | 568 uint64_t begin, cycles; in test_rwc_reader() local 653 cycles = rte_rdtsc_precise() - begin; in test_rwc_reader() 654 __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED); in test_rwc_reader() 1271 uint64_t begin, cycles; in test_hash_rcu_qsbr_writer() local 1283 cycles = rte_rdtsc_precise() - begin; in test_hash_rcu_qsbr_writer() 1284 __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED); in test_hash_rcu_qsbr_writer()
|
| /dpdk/lib/graph/ |
| H A D | graph_stats.c | 63 const uint64_t cycles = stat->cycles; in print_node() local 72 call_delta ? (double)((cycles - stat->prev_cycles) / call_delta) in print_node() 333 uint64_t calls = 0, cycles = 0, objs = 0, realloc_count = 0; in cluster_node_arregate_stats() local 343 cycles += node->total_cycles; in cluster_node_arregate_stats() 349 stat->cycles = cycles; in cluster_node_arregate_stats() 362 stat->prev_cycles = stat->cycles; in cluster_node_store_prev_stats() 400 node->cycles = 0; in rte_graph_cluster_stats_reset()
|
| H A D | rte_graph.h | 200 uint64_t cycles; /**< Current number of cycles. */ member
|
| /dpdk/examples/rxtx_callbacks/ |
| H A D | main.c | 74 uint64_t cycles = 0; in calc_latency() local 84 cycles += now - *tsc_field(pkts[i]); in calc_latency() 89 latency_numbers.total_cycles += cycles; in calc_latency() 199 uint64_t cycles = rte_rdtsc(); in port_init() local 202 uint64_t c_freq = cycles - cycles_base; in port_init()
|
| /dpdk/lib/pcapng/ |
| H A D | rte_pcapng.c | 39 uint64_t cycles; member 46 pcapng_time.cycles = rte_get_tsc_cycles(); in RTE_INIT() 52 static uint64_t pcapng_tsc_to_ns(uint64_t cycles) in pcapng_tsc_to_ns() argument 56 delta = cycles - pcapng_time.cycles; in pcapng_tsc_to_ns() 413 uint32_t length, uint64_t cycles, in rte_pcapng_copy() argument 426 ns = pcapng_tsc_to_ns(cycles); in rte_pcapng_copy()
|
| /dpdk/lib/eal/include/ |
| H A D | rte_time.h | 40 rte_cyclecounter_cycles_to_ns(struct rte_timecounter *tc, uint64_t cycles) in rte_cyclecounter_cycles_to_ns() argument 45 ns = cycles + tc->nsec_frac; in rte_cyclecounter_cycles_to_ns()
|
| /dpdk/doc/guides/howto/ |
| H A D | debug_troubleshoot.rst | 9 hardware devices which helps in offloading CPU cycles too. It is common to find 85 lcore threads has enough cycles for ``rte_eth_rx_burst`` on the port queue 89 threads gets enough cycles. 100 distributor, or event RX adapter not having enough cycles. 127 thread has enough cycles to consume the packets from the queue. 203 * Heavy processing cycles at single or multiple processing stages. 210 * Extra cycles to linearize multi-segment buffer and software offload like 321 Is the execution cycles for dynamic service functions are not frequent? 342 * If service function execution cycles for dynamic service functions are 388 insufficient CPU cycles. Use ``rte_tm_capabilities_get`` to fetch features
|
| /dpdk/doc/guides/tools/ |
| H A D | comp_perf.rst | 17 to check the throughput rate (showing cycles/iteration, cycles/Byte and Gbps, 20 the number of cycles per operation for the 3 phases: setup, enqueue_burst and 22 inserted between enqueue and dequeue so no cycles are wasted in retries while
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | rxtx_callbacks.rst | 14 prior to transmission to calculate the elapsed time, in CPU cycles. 108 each packet (see the *cycles* section of the *DPDK API Documentation* for 124 the total number of cycles used. Once more than 100 million packets have been
|
| H A D | eventdev_pipeline.rst | 13 configured for various numbers worker cores, stages,queue depths and cycles per 45 * ``-W1000``: do 1000 cycles of work per packet in each stage
|
| /dpdk/app/test-eventdev/ |
| H A D | test_order_common.c | 292 uint64_t cycles = rte_get_timer_cycles(); in order_launch_lcores() local 304 if (new_cycles - cycles > rte_get_timer_hz() * 1) { in order_launch_lcores() 314 cycles = new_cycles; in order_launch_lcores()
|
| /dpdk/doc/guides/mempool/ |
| H A D | cnxk.rst | 21 - Ethdev Rx buffer allocation in HW to save CPU cycles in the Rx path. 22 - Ethdev Tx buffer recycling in HW to save CPU cycles in the Tx path.
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | service_cores.rst | 13 require CPU cycles to operate) and service cores (DPDK lcores, tasked with 52 of calls to a specific service, and number of cycles used by the service. The
|
| /dpdk/drivers/event/dpaa2/ |
| H A D | dpaa2_eventdev_selftest.c | 466 uint64_t cycles, print_cycles; in wait_workers_to_join() local 470 print_cycles = cycles = rte_get_timer_cycles(); in wait_workers_to_join() 479 if (new_cycles - cycles > rte_get_timer_hz() * 10) { in wait_workers_to_join() 485 cycles = new_cycles; in wait_workers_to_join()
|
| /dpdk/app/test-regex/ |
| H A D | main.c | 53 uint64_t cycles; member 516 qp->cycles = 0; in run_regex() 562 qp->cycles += in run_regex() 571 time = (long double)qp->cycles / rte_get_timer_hz(); in run_regex()
|
| /dpdk/drivers/common/dpaax/ |
| H A D | compat.h | 204 static inline void cpu_spin(int cycles) in cpu_spin() argument 208 while (mfatb() < (now + cycles)) in cpu_spin()
|
| /dpdk/drivers/event/cnxk/ |
| H A D | cnxk_eventdev_selftest.c | 565 uint64_t cycles, print_cycles; in wait_workers_to_join() local 567 cycles = rte_get_timer_cycles(); in wait_workers_to_join() 568 print_cycles = cycles; in wait_workers_to_join() 577 if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) { in wait_workers_to_join() 581 cycles = new_cycles; in wait_workers_to_join()
|
| /dpdk/drivers/event/octeontx/ |
| H A D | ssovf_evdev_selftest.c | 578 uint64_t cycles, print_cycles; in wait_workers_to_join() local 581 print_cycles = cycles = rte_get_timer_cycles(); in wait_workers_to_join() 590 if (new_cycles - cycles > rte_get_timer_hz() * 10) { in wait_workers_to_join() 596 cycles = new_cycles; in wait_workers_to_join()
|