| /f-stack/dpdk/app/test/ |
| H A D | test_trace_perf.c | 50 unsigned int workers; in measure_perf() local 52 for (workers = 0; workers < data->nb_workers; workers++) { in measure_perf() 53 total_cycles += data->ldata[workers].total_cycles; in measure_perf() 54 total_calls += data->ldata[workers].total_calls; in measure_perf() 68 unsigned int workers; in wait_till_workers_are_ready() local 70 for (workers = 0; workers < data->nb_workers; workers++) in wait_till_workers_are_ready() 71 while (!data->ldata[workers].started) in wait_till_workers_are_ready() 78 unsigned int workers; in signal_workers_to_finish() local 80 for (workers = 0; workers < data->nb_workers; workers++) { in signal_workers_to_finish() 81 data->ldata[workers].done = 1; in signal_workers_to_finish()
|
| /f-stack/dpdk/drivers/crypto/scheduler/ |
| H A D | scheduler_roundrobin.c | 12 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; member 25 struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx]; in schedule_enqueue() 71 if (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops in schedule_dequeue() 81 } while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops in schedule_dequeue() 85 worker = &rr_qp_ctx->workers[last_worker_idx]; in schedule_dequeue() 146 memset(rr_qp_ctx->workers, 0, in scheduler_start() 150 rr_qp_ctx->workers[j].dev_id = in scheduler_start() 151 sched_ctx->workers[j].dev_id; in scheduler_start() 152 rr_qp_ctx->workers[j].qp_id = i; in scheduler_start()
|
| H A D | scheduler_pmd_ops.c | 73 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_config() 166 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_start() 183 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_start() 210 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stop() 221 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stop() 242 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_close() 287 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stats_get() 310 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stats_reset() 338 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_info_get() 413 uint8_t worker_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_qp_setup() [all …]
|
| H A D | rte_cryptodev_scheduler.c | 103 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); in update_scheduler_capability() 133 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); in update_scheduler_feature_flag() 153 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); in update_max_nb_qp() 194 if (sched_ctx->workers[i].dev_id == worker_id) { in rte_cryptodev_scheduler_worker_attach() 199 worker = &sched_ctx->workers[sched_ctx->nb_workers]; in rte_cryptodev_scheduler_worker_attach() 248 if (sched_ctx->workers[worker_pos].dev_id == worker_id) in rte_cryptodev_scheduler_worker_detach() 261 memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1], in rte_cryptodev_scheduler_worker_detach() 264 memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0, in rte_cryptodev_scheduler_worker_detach() 488 rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers) in rte_cryptodev_scheduler_workers_get() argument 508 if (workers && nb_workers) { in rte_cryptodev_scheduler_workers_get() [all …]
|
| H A D | scheduler_failover.c | 83 struct scheduler_worker *workers[NB_FAILOVER_WORKERS] = { in schedule_dequeue() local 85 struct scheduler_worker *worker = workers[qp_ctx->deq_idx]; in schedule_dequeue() 99 worker = workers[qp_ctx->deq_idx]; in schedule_dequeue() 161 &sched_ctx->workers[PRIMARY_WORKER_IDX], in scheduler_start() 164 &sched_ctx->workers[SECONDARY_WORKER_IDX], in scheduler_start()
|
| H A D | scheduler_multicore.c | 29 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; member 180 worker = &sched_ctx->workers[worker_idx]; in mc_scheduler_worker() 269 memset(mc_qp_ctx->workers, 0, in scheduler_start() 273 mc_qp_ctx->workers[j].dev_id = in scheduler_start() 274 sched_ctx->workers[j].dev_id; in scheduler_start() 275 mc_qp_ctx->workers[j].qp_id = i; in scheduler_start()
|
| H A D | scheduler_pkt_size_distr.c | 194 struct scheduler_worker *workers[NB_PKT_SIZE_WORKERS] = { in schedule_dequeue() local 196 struct scheduler_worker *worker = workers[qp_ctx->deq_idx]; in schedule_dequeue() 210 worker = workers[qp_ctx->deq_idx]; in schedule_dequeue() 271 sched_ctx->workers[PRIMARY_WORKER_IDX].dev_id; in scheduler_start() 276 sched_ctx->workers[SECONDARY_WORKER_IDX].dev_id; in scheduler_start()
|
| H A D | rte_cryptodev_scheduler.h | 215 rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers);
|
| H A D | scheduler_pmd_private.h | 38 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; member
|
| H A D | scheduler_pmd.c | 257 sched_ctx->workers[i].dev_id); in cryptodev_scheduler_remove()
|
| /f-stack/dpdk/doc/guides/prog_guide/ |
| H A D | packet_distrib_lib.rst | 19 one which sends one packet at a time to workers using 32-bits for flow_id, 20 and an optimized mode which sends bursts of up to 8 packets at a time to workers, using 15 bits of … 26 …core does the majority of the processing for ensuring that packets are fairly shared among workers. 32 …The process API call will poll all the worker cache lines to see what workers are requesting packe… 34 #. As workers request packets, the distributor takes packets from the set of packets passed in and… 44 #. Once all input packets passed to the process API have either been distributed to workers 65 therefore that additional packets with the same tag can safely be distributed to other workers --
|
| H A D | reorder_lib.rst | 78 multiple workers cores. 79 The processing of packets by the workers is not guaranteed to be in order, 83 delivering them to the workers. 84 As the workers finish processing the packets, the distributor inserts those
|
| H A D | eventdev.rst | 170 while the 6 ports consist of 4 workers, 1 RX and 1 TX. 271 Linking all workers to atomic queues, and the TX core to the single-link queue 339 workers. Note that each worker will dequeue as many events as it can in a burst,
|
| H A D | rcu_lib.rst | 109 workers in the application. The writer has to wait only for the workers that
|
| H A D | qos_framework.rst | 49 …| 5 | Load Balancer | Distribute the input packets to the application workers. Provide un… 50 …| | | to each worker. Preserve the affinity of traffic flows to workers a…
|
| H A D | poll_mode_drv.rst | 128 enables more scaling as all workers can send the packets.
|
| /f-stack/app/redis-5.0.5/deps/jemalloc/msvc/test_threads/ |
| H A D | test_threads.cpp | 30 vector<thread> workers; in test_threads() local 40 workers.emplace_back([tid=i]() { in test_threads() 75 for (thread& t : workers) { in test_threads()
|
| /f-stack/dpdk/doc/guides/cryptodevs/ |
| H A D | scheduler.rst | 19 workers, and distributes the crypto workload to them with certain behavior. 21 mode defines certain actions for scheduling crypto ops to its workers. 24 for attaching/detaching workers, set/get scheduling modes, and enable/disable 106 among its workers in a round-robin manner. This mode may help to fill 114 Packet-size based distribution mode, which works with 2 workers, the primary 143 Fail-over mode, which works with 2 workers, the primary worker and the 167 * corelist: Semicolon-separated list of logical cores to be used as workers.
|
| /f-stack/dpdk/doc/guides/tools/ |
| H A D | testeventdev.rst | 77 Set the list of cores to be used as workers. 203 q1(atomic) are linked to all the workers. 208 processed in parallel on the different workers, the ingress order of events 344 Q and P is a function of the number of workers, the number of producers and 347 The user can choose the number of workers, the number of producers and number of 460 and P ports, where Q and P is a function of the number of workers and number of 564 where Q and P is a function of the number of workers, the number of producers 567 The user can choose the number of workers and number of stages through the 679 where Q and P is a function of the number of workers, the number of producers
|
| /f-stack/doc/ |
| H A D | F-Stack_Nginx_APP_Guide.md | 41 …awn primary worker firstly, and then wait for primary startup, continue to spawn secondary workers.
|
| /f-stack/dpdk/drivers/event/dpaa2/ |
| H A D | dpaa2_eventdev_selftest.c | 496 int (*workers)(void *), uint32_t total_events, in launch_workers_and_wait() 542 rte_eal_remote_launch(workers, ¶m[port], w_lcore); in launch_workers_and_wait()
|
| /f-stack/dpdk/doc/guides/sample_app_ug/ |
| H A D | eventdev_pipeline.rst | 88 workers: 8
|
| H A D | dist_app.rst | 74 from the ring and assign them to workers (using ``rte_distributor_process()`` API).
|
| /f-stack/dpdk/doc/guides/eventdevs/ |
| H A D | dlb.rst | 275 Due to this, workers should stop retrying after a time, release the events it
|
| H A D | dlb2.rst | 266 Due to this, workers should stop retrying after a time, release the events it
|