Home
last modified time | relevance | path

Searched refs:worker (Results 1 – 25 of 53) sorted by relevance

123

/dpdk/doc/guides/sample_app_ug/
H A Deventdev_pipeline.rst44 * ``-c32``: worker dequeue depth of 32
125 worker 0 thread done. RX=0 TX=0
131 worker 2 thread done. RX=0 TX=0
134 worker 0 : 12.5 % (4979876 pkts)
135 worker 1 : 12.5 % (4970497 pkts)
136 worker 2 : 12.5 % (4986359 pkts)
137 worker 3 : 12.5 % (4970517 pkts)
138 worker 4 : 12.5 % (4966566 pkts)
139 worker 5 : 12.5 % (4963297 pkts)
140 worker 6 : 12.5 % (4953598 pkts)
[all …]
H A Dpacket_ordering.rst18 * Worker (worker core) basically do some light work on the packet.
22 * TX Core (worker core) receives traffic from Worker cores through software queues,
47 [--disable-reorder] [--insight-worker]
61 The insight-worker long option enables output the packet statistics of each worker thread.
H A Ddist_app.rst66 worker threads (``lcore_worker()``), and a transmit thread(``lcore_tx()``).
78 tag. The distributor thread communicates with the worker threads using a
80 (one cache line) to each worker.
82 More than one worker thread can exist as part of the application, and these
83 worker threads do simple packet processing by requesting packets from
133 in the application, and also key statistics per worker, including how many
134 packets of each burst size (1-8) were sent to each worker thread.
H A Dtimer.rst46 executed on each worker lcore using the well-known
51 :start-after: Call lcore_mainloop() on every worker lcore. 8<
52 :end-before: >8 End of call lcore_mainloop() on every worker lcore.
H A Dqos_scheduler.rst26 The worker thread dequeues the packets from the ring and calls the QoS scheduler enqueue/dequeue fu…
82 by the I/O RX lcores to send packets to worker lcores (the default value is 8192).
85 by worker lcores (the default value is 256)
92worker lcore read burst size from input software rings,QoS enqueue size (the default value is 64)
186 from port 3 and a worker thread on lcore 7 writing to port 2.
H A Dserver_node_efd.rst31 for each flow a target backend worker node is specified. The EFD table does not
37 server and worker nodes are processes running on the same platform.
70 Upon initializing, the worker node (process) creates a flow table (a regular
75 The worker node's main loop is simply receiving packets then doing a hash table
H A Dkeep_alive.rst20 monitors the state of packet processing cores (worker cores) by
30 Note: Only the worker cores are monitored. A local (on the host) mechanism
/dpdk/drivers/crypto/scheduler/
H A Dscheduler_failover.c24 failover_worker_enqueue(struct scheduler_worker *worker, in failover_worker_enqueue() argument
32 processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id, in failover_worker_enqueue()
33 worker->qp_id, ops, nb_ops); in failover_worker_enqueue()
34 worker->nb_inflight_cops += processed_ops; in failover_worker_enqueue()
88 if (worker->nb_inflight_cops) { in schedule_dequeue()
90 worker->qp_id, ops, nb_ops); in schedule_dequeue()
91 worker->nb_inflight_cops -= nb_deq_ops; in schedule_dequeue()
99 worker = workers[qp_ctx->deq_idx]; in schedule_dequeue()
101 if (worker->nb_inflight_cops) { in schedule_dequeue()
103 worker->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops); in schedule_dequeue()
[all …]
H A Dscheduler_roundrobin.c25 struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx]; in schedule_enqueue() local
34 processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id, in schedule_enqueue()
35 worker->qp_id, ops, nb_ops); in schedule_enqueue()
37 worker->nb_inflight_cops += processed_ops; in schedule_enqueue()
67 struct scheduler_worker *worker; in schedule_dequeue() local
85 worker = &rr_qp_ctx->workers[last_worker_idx]; in schedule_dequeue()
87 nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id, in schedule_dequeue()
88 worker->qp_id, ops, nb_ops); in schedule_dequeue()
95 worker->nb_inflight_cops -= nb_deq_ops; in schedule_dequeue()
H A Dscheduler_pkt_size_distr.c196 struct scheduler_worker *worker = workers[qp_ctx->deq_idx]; in schedule_dequeue() local
199 if (worker->nb_inflight_cops) { in schedule_dequeue()
200 nb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id, in schedule_dequeue()
201 worker->qp_id, ops, nb_ops); in schedule_dequeue()
202 worker->nb_inflight_cops -= nb_deq_ops_pri; in schedule_dequeue()
210 worker = workers[qp_ctx->deq_idx]; in schedule_dequeue()
212 if (worker->nb_inflight_cops) { in schedule_dequeue()
213 nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id, in schedule_dequeue()
214 worker->qp_id, &ops[nb_deq_ops_pri], in schedule_dequeue()
216 worker->nb_inflight_cops -= nb_deq_ops_sec; in schedule_dequeue()
[all …]
H A Dscheduler_multicore.c157 struct scheduler_worker *worker; in mc_scheduler_worker() local
180 worker = &sched_ctx->workers[worker_idx]; in mc_scheduler_worker()
187 rte_cryptodev_enqueue_burst(worker->dev_id, in mc_scheduler_worker()
188 worker->qp_id, in mc_scheduler_worker()
199 worker->dev_id, worker->qp_id, in mc_scheduler_worker()
214 worker->dev_id, worker->qp_id, deq_ops, in mc_scheduler_worker()
H A Drte_cryptodev_scheduler.c167 struct scheduler_worker *worker; in rte_cryptodev_scheduler_worker_attach() local
199 worker = &sched_ctx->workers[sched_ctx->nb_workers]; in rte_cryptodev_scheduler_worker_attach()
203 worker->dev_id = worker_id; in rte_cryptodev_scheduler_worker_attach()
204 worker->driver_id = dev_info.driver_id; in rte_cryptodev_scheduler_worker_attach()
208 worker->dev_id = 0; in rte_cryptodev_scheduler_worker_attach()
209 worker->driver_id = 0; in rte_cryptodev_scheduler_worker_attach()
H A Dscheduler_pmd_ops.c481 struct scheduler_worker *worker = &sched_ctx->workers[i]; in scheduler_pmd_sym_session_configure() local
483 ret = rte_cryptodev_sym_session_init(worker->dev_id, sess, in scheduler_pmd_sym_session_configure()
504 struct scheduler_worker *worker = &sched_ctx->workers[i]; in scheduler_pmd_sym_session_clear() local
506 rte_cryptodev_sym_session_clear(worker->dev_id, sess); in scheduler_pmd_sym_session_clear()
/dpdk/doc/guides/cryptodevs/
H A Dscheduler.rst79 …dev "crypto_aesni_mb1,name=aesni_mb_2" --vdev "crypto_scheduler,worker=aesni_mb_1,worker=aesni_mb_…
84 is set and at least one worker is attached. Also, to configure the
85 scheduler in the run-time, like attach/detach worker(s), change
115 worker and the secondary worker, and distributes the enqueued crypto
119 worker.
145 crypto operation burst to the primary worker. When one or more crypto
147 worker.
154 worker cores. The enqueued bursts are distributed among the worker cores in a
162 Each worker uses its own cryptodev. Only software cryptodevs
168 The number of worker cores should be equal to the number of worker cryptodevs.
[all …]
/dpdk/doc/guides/prog_guide/
H A Dpacket_distrib_lib.rst11 and a set of worker lcores which are responsible for receiving the packets from the distributor and…
31 #. The worker lcores all share a single cache line with the distributor core in order to pass mess…
32 …The process API call will poll all the worker cache lines to see what workers are requesting packe…
36 and records what tags are being processed by each worker.
38 #. If the next packet in the input set has a tag which is already being processed by a worker,
39 then that packet will be queued up for processing by that worker
45 or been queued up for a worker which is processing a given tag,
58 It returns to the caller all packets which have finished processing by all worker cores.
62 If worker lcores buffer up packets internally for transmission in bulk afterwards,
91 Since it may be desirable to vary the number of worker cores, depending on the traffic load
[all …]
H A Deventdev.rst154 Ports are the points of contact between worker cores and the eventdev. The
371 workers. Note that each worker will dequeue as many events as it can in a burst,
375 The worker can lookup the events source from ``event.queue_id``, which should
376 indicate to the worker what workload needs to be performed on the event.
377 Once done, the worker can update the ``event.queue_id`` to a new value, to send
405 An event driven worker thread has following typical workflow on fastpath:
419 or while tearing down a worker core using an event port,
421 associated with the event port are released from the worker core,
/dpdk/app/test-eventdev/
H A Dtest_pipeline_common.c18 total += t->worker[i].processed_pkts; in pipeline_test_result()
22 t->worker[i].processed_pkts, in pipeline_test_result()
23 (((double)t->worker[i].processed_pkts)/total) in pipeline_test_result()
54 total += t->worker[i].processed_pkts; in processed_pkts()
61 int (*worker)(void *)) in pipeline_launch_lcores()
72 ret = rte_eal_remote_launch(worker, in pipeline_launch_lcores()
73 &t->worker[port_idx], lcore_id); in pipeline_launch_lcores()
301 struct worker_data *w = &t->worker[port]; in pipeline_event_port_setup()
H A Dtest_order_common.c263 int (*worker)(void *)) in order_launch_lcores()
274 ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], in order_launch_lcores()
350 struct worker_data *w = &t->worker[port]; in order_event_dev_port_setup()
H A Dtest_order_common.h59 struct worker_data worker[EVT_MAX_PORTS]; member
145 int (*worker)(void *));
H A Dtest_perf_common.h66 struct worker_data worker[EVT_MAX_PORTS]; member
179 int (*worker)(void *));
H A Dtest_perf_common.c21 total += t->worker[i].processed_pkts; in perf_test_result()
25 t->worker[i].processed_pkts, in perf_test_result()
26 (((double)t->worker[i].processed_pkts)/total) in perf_test_result()
432 total += t->worker[i].processed_pkts; in processed_pkts()
444 total += t->worker[i].latency; in total_latency()
452 int (*worker)(void *)) in perf_launch_lcores()
463 ret = rte_eal_remote_launch(worker, in perf_launch_lcores()
464 &t->worker[port_idx], lcore_id); in perf_launch_lcores()
744 struct worker_data *w = &t->worker[port]; in perf_event_dev_port_setup()
H A Dtest_pipeline_common.h51 struct worker_data worker[EVT_MAX_PORTS]; member
205 int (*worker)(void *));
/dpdk/drivers/event/sw/
H A Dsw_evdev_scheduler.c102 struct rte_event_ring *worker = p->cq_worker_ring; in sw_schedule_atomic_to_cq() local
103 rte_event_ring_enqueue_burst(worker, p->cq_buf, in sw_schedule_atomic_to_cq()
344 struct rte_event_ring *worker = port->rx_worker_ring; in sw_refill_pp_buf() local
346 port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf, in sw_refill_pp_buf()
573 struct rte_event_ring *worker = port->cq_worker_ring; in sw_event_schedule() local
580 rte_event_ring_enqueue_burst(worker, in sw_event_schedule()
589 rte_event_ring_free_count(worker) - in sw_event_schedule()
/dpdk/app/test/
H A Dtest_trace_perf.c129 unsigned int id, worker = 0; in WORKER_DEFINE() local
134 rte_eal_remote_launch(f, &data->ldata[worker++], id); in WORKER_DEFINE()
/dpdk/lib/distributor/
H A Drte_distributor_single.c255 unsigned worker = __builtin_ctzl(match); in rte_distributor_process_single() local
256 if (add_to_backlog(&d->backlog[worker], in rte_distributor_process_single()

123