Home
last modified time | relevance | path

Searched refs:workers (Results 1 – 25 of 25) sorted by relevance

/f-stack/dpdk/app/test/
H A Dtest_trace_perf.c50 unsigned int workers; in measure_perf() local
52 for (workers = 0; workers < data->nb_workers; workers++) { in measure_perf()
53 total_cycles += data->ldata[workers].total_cycles; in measure_perf()
54 total_calls += data->ldata[workers].total_calls; in measure_perf()
68 unsigned int workers; in wait_till_workers_are_ready() local
70 for (workers = 0; workers < data->nb_workers; workers++) in wait_till_workers_are_ready()
71 while (!data->ldata[workers].started) in wait_till_workers_are_ready()
78 unsigned int workers; in signal_workers_to_finish() local
80 for (workers = 0; workers < data->nb_workers; workers++) { in signal_workers_to_finish()
81 data->ldata[workers].done = 1; in signal_workers_to_finish()
/f-stack/dpdk/drivers/crypto/scheduler/
H A Dscheduler_roundrobin.c12 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; member
25 struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx]; in schedule_enqueue()
71 if (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops in schedule_dequeue()
81 } while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops in schedule_dequeue()
85 worker = &rr_qp_ctx->workers[last_worker_idx]; in schedule_dequeue()
146 memset(rr_qp_ctx->workers, 0, in scheduler_start()
150 rr_qp_ctx->workers[j].dev_id = in scheduler_start()
151 sched_ctx->workers[j].dev_id; in scheduler_start()
152 rr_qp_ctx->workers[j].qp_id = i; in scheduler_start()
H A Dscheduler_pmd_ops.c73 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_config()
166 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_start()
183 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_start()
210 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stop()
221 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stop()
242 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_close()
287 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stats_get()
310 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_stats_reset()
338 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_info_get()
413 uint8_t worker_id = sched_ctx->workers[i].dev_id; in scheduler_pmd_qp_setup()
[all …]
H A Drte_cryptodev_scheduler.c103 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); in update_scheduler_capability()
133 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); in update_scheduler_feature_flag()
153 rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); in update_max_nb_qp()
194 if (sched_ctx->workers[i].dev_id == worker_id) { in rte_cryptodev_scheduler_worker_attach()
199 worker = &sched_ctx->workers[sched_ctx->nb_workers]; in rte_cryptodev_scheduler_worker_attach()
248 if (sched_ctx->workers[worker_pos].dev_id == worker_id) in rte_cryptodev_scheduler_worker_detach()
261 memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1], in rte_cryptodev_scheduler_worker_detach()
264 memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0, in rte_cryptodev_scheduler_worker_detach()
488 rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers) in rte_cryptodev_scheduler_workers_get() argument
508 if (workers && nb_workers) { in rte_cryptodev_scheduler_workers_get()
[all …]
H A Dscheduler_failover.c83 struct scheduler_worker *workers[NB_FAILOVER_WORKERS] = { in schedule_dequeue() local
85 struct scheduler_worker *worker = workers[qp_ctx->deq_idx]; in schedule_dequeue()
99 worker = workers[qp_ctx->deq_idx]; in schedule_dequeue()
161 &sched_ctx->workers[PRIMARY_WORKER_IDX], in scheduler_start()
164 &sched_ctx->workers[SECONDARY_WORKER_IDX], in scheduler_start()
H A Dscheduler_multicore.c29 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; member
180 worker = &sched_ctx->workers[worker_idx]; in mc_scheduler_worker()
269 memset(mc_qp_ctx->workers, 0, in scheduler_start()
273 mc_qp_ctx->workers[j].dev_id = in scheduler_start()
274 sched_ctx->workers[j].dev_id; in scheduler_start()
275 mc_qp_ctx->workers[j].qp_id = i; in scheduler_start()
H A Dscheduler_pkt_size_distr.c194 struct scheduler_worker *workers[NB_PKT_SIZE_WORKERS] = { in schedule_dequeue() local
196 struct scheduler_worker *worker = workers[qp_ctx->deq_idx]; in schedule_dequeue()
210 worker = workers[qp_ctx->deq_idx]; in schedule_dequeue()
271 sched_ctx->workers[PRIMARY_WORKER_IDX].dev_id; in scheduler_start()
276 sched_ctx->workers[SECONDARY_WORKER_IDX].dev_id; in scheduler_start()
H A Drte_cryptodev_scheduler.h215 rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers);
H A Dscheduler_pmd_private.h38 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; member
H A Dscheduler_pmd.c257 sched_ctx->workers[i].dev_id); in cryptodev_scheduler_remove()
/f-stack/dpdk/doc/guides/prog_guide/
H A Dpacket_distrib_lib.rst19 one which sends one packet at a time to workers using 32-bits for flow_id,
20 and an optimized mode which sends bursts of up to 8 packets at a time to workers, using 15 bits of …
26 …core does the majority of the processing for ensuring that packets are fairly shared among workers.
32 …The process API call will poll all the worker cache lines to see what workers are requesting packe…
34 #. As workers request packets, the distributor takes packets from the set of packets passed in and…
44 #. Once all input packets passed to the process API have either been distributed to workers
65 therefore that additional packets with the same tag can safely be distributed to other workers --
H A Dreorder_lib.rst78 multiple workers cores.
79 The processing of packets by the workers is not guaranteed to be in order,
83 delivering them to the workers.
84 As the workers finish processing the packets, the distributor inserts those
H A Deventdev.rst170 while the 6 ports consist of 4 workers, 1 RX and 1 TX.
271 Linking all workers to atomic queues, and the TX core to the single-link queue
339 workers. Note that each worker will dequeue as many events as it can in a burst,
H A Drcu_lib.rst109 workers in the application. The writer has to wait only for the workers that
H A Dqos_framework.rst49 …| 5 | Load Balancer | Distribute the input packets to the application workers. Provide un…
50 …| | | to each worker. Preserve the affinity of traffic flows to workers a…
H A Dpoll_mode_drv.rst128 enables more scaling as all workers can send the packets.
/f-stack/app/redis-5.0.5/deps/jemalloc/msvc/test_threads/
H A Dtest_threads.cpp30 vector<thread> workers; in test_threads() local
40 workers.emplace_back([tid=i]() { in test_threads()
75 for (thread& t : workers) { in test_threads()
/f-stack/dpdk/doc/guides/cryptodevs/
H A Dscheduler.rst19 workers, and distributes the crypto workload to them with certain behavior.
21 mode defines certain actions for scheduling crypto ops to its workers.
24 for attaching/detaching workers, set/get scheduling modes, and enable/disable
106 among its workers in a round-robin manner. This mode may help to fill
114 Packet-size based distribution mode, which works with 2 workers, the primary
143 Fail-over mode, which works with 2 workers, the primary worker and the
167 * corelist: Semicolon-separated list of logical cores to be used as workers.
/f-stack/dpdk/doc/guides/tools/
H A Dtesteventdev.rst77 Set the list of cores to be used as workers.
203 q1(atomic) are linked to all the workers.
208 processed in parallel on the different workers, the ingress order of events
344 Q and P is a function of the number of workers, the number of producers and
347 The user can choose the number of workers, the number of producers and number of
460 and P ports, where Q and P is a function of the number of workers and number of
564 where Q and P is a function of the number of workers, the number of producers
567 The user can choose the number of workers and number of stages through the
679 where Q and P is a function of the number of workers, the number of producers
/f-stack/doc/
H A DF-Stack_Nginx_APP_Guide.md41 …awn primary worker firstly, and then wait for primary startup, continue to spawn secondary workers.
/f-stack/dpdk/drivers/event/dpaa2/
H A Ddpaa2_eventdev_selftest.c496 int (*workers)(void *), uint32_t total_events, in launch_workers_and_wait()
542 rte_eal_remote_launch(workers, &param[port], w_lcore); in launch_workers_and_wait()
/f-stack/dpdk/doc/guides/sample_app_ug/
H A Deventdev_pipeline.rst88 workers: 8
H A Ddist_app.rst74 from the ring and assign them to workers (using ``rte_distributor_process()`` API).
/f-stack/dpdk/doc/guides/eventdevs/
H A Ddlb.rst275 Due to this, workers should stop retrying after a time, release the events it
H A Ddlb2.rst266 Due to this, workers should stop retrying after a time, release the events it