| /f-stack/dpdk/drivers/event/dlb/ |
| H A D | dlb_selftest.c | 275 struct rte_event_queue_conf queue_conf; in test_single_link() local 337 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) { in test_single_link() 349 queue_conf.event_queue_cfg = 0; in test_single_link() 471 struct rte_event_queue_conf queue_conf; in test_reconfiguration_link() local 705 queue_conf.event_queue_cfg = 0; in test_reconfiguration_link() 798 struct rte_event_queue_conf queue_conf; in test_load_balanced_traffic() local 922 struct rte_event_queue_conf queue_conf; in test_directed_traffic() local 1055 struct rte_event_queue_conf queue_conf; in test_deferred_sched() local 1122 queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL; in test_deferred_sched() 1123 queue_conf.nb_atomic_order_sequences = 0; in test_deferred_sched() [all …]
|
| /f-stack/dpdk/drivers/event/dlb2/ |
| H A D | dlb2_selftest.c | 285 struct rte_event_queue_conf queue_conf; in test_single_link() local 347 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) { in test_single_link() 359 queue_conf.event_queue_cfg = 0; in test_single_link() 494 struct rte_event_queue_conf queue_conf; in test_reconfiguration_link() local 728 queue_conf.event_queue_cfg = 0; in test_reconfiguration_link() 821 struct rte_event_queue_conf queue_conf; in test_load_balanced_traffic() local 945 struct rte_event_queue_conf queue_conf; in test_directed_traffic() local 1078 struct rte_event_queue_conf queue_conf; in test_deferred_sched() local 1145 queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL; in test_deferred_sched() 1146 queue_conf.nb_atomic_order_sequences = 0; in test_deferred_sched() [all …]
|
| /f-stack/dpdk/lib/librte_eventdev/ |
| H A D | rte_eventdev_trace.h | 44 const struct rte_event_queue_conf *queue_conf), 47 rte_trace_point_emit_u32(queue_conf->nb_atomic_flows); 48 rte_trace_point_emit_u32(queue_conf->nb_atomic_order_sequences); 49 rte_trace_point_emit_u32(queue_conf->event_queue_cfg); 50 rte_trace_point_emit_u8(queue_conf->schedule_type); 51 rte_trace_point_emit_u8(queue_conf->priority); 126 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf, 131 rte_trace_point_emit_u32(queue_conf->rx_queue_flags); 132 rte_trace_point_emit_u16(queue_conf->servicing_weight); 133 rte_trace_point_emit_u8(queue_conf->ev.queue_id); [all …]
|
| H A D | rte_eventdev.c | 594 if (queue_conf == NULL) in rte_event_queue_default_conf_get() 611 if (queue_conf && in is_valid_atomic_queue_conf() 612 !(queue_conf->event_queue_cfg & in is_valid_atomic_queue_conf() 614 ((queue_conf->event_queue_cfg & in is_valid_atomic_queue_conf() 616 (queue_conf->schedule_type in is_valid_atomic_queue_conf() 627 if (queue_conf && in is_valid_ordered_queue_conf() 628 !(queue_conf->event_queue_cfg & in is_valid_ordered_queue_conf() 630 ((queue_conf->event_queue_cfg & in is_valid_ordered_queue_conf() 632 (queue_conf->schedule_type in is_valid_ordered_queue_conf() 690 if (queue_conf == NULL) { in rte_event_queue_setup() [all …]
|
| H A D | rte_eventdev_pmd.h | 222 uint8_t queue_id, struct rte_event_queue_conf *queue_conf); 239 const struct rte_event_queue_conf *queue_conf); 530 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
|
| H A D | rte_event_eth_rx_adapter.c | 1761 if (queue_conf->servicing_weight == 0) { in rxa_sw_add() 1764 temp_conf = *queue_conf; in rxa_sw_add() 1769 queue_conf = &temp_conf; in rxa_sw_add() 1774 wt = queue_conf->servicing_weight; in rxa_sw_add() 1789 queue_conf->servicing_weight, in rxa_sw_add() 1849 rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); in rxa_sw_add() 2062 if ((rx_adapter == NULL) || (queue_conf == NULL)) in rte_event_eth_rx_adapter_queue_add() 2076 && (queue_conf->rx_queue_flags & in rte_event_eth_rx_adapter_queue_add() 2116 rx_queue_id, queue_conf); in rte_event_eth_rx_adapter_queue_add() 2131 queue_conf); in rte_event_eth_rx_adapter_queue_add() [all …]
|
| /f-stack/dpdk/drivers/event/skeleton/ |
| H A D | skeleton_eventdev.c | 160 struct rte_event_queue_conf *queue_conf) in skeleton_eventdev_queue_def_conf() argument 169 queue_conf->nb_atomic_flows = (1ULL << 20); in skeleton_eventdev_queue_def_conf() 170 queue_conf->nb_atomic_order_sequences = (1ULL << 20); in skeleton_eventdev_queue_def_conf() 171 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; in skeleton_eventdev_queue_def_conf() 172 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in skeleton_eventdev_queue_def_conf() 186 const struct rte_event_queue_conf *queue_conf) in skeleton_eventdev_queue_setup() argument 193 RTE_SET_USED(queue_conf); in skeleton_eventdev_queue_setup()
|
| /f-stack/dpdk/drivers/event/octeontx/ |
| H A D | ssovf_evdev.c | 184 struct rte_event_queue_conf *queue_conf) in ssovf_queue_def_conf() argument 189 queue_conf->nb_atomic_flows = (1ULL << 20); in ssovf_queue_def_conf() 190 queue_conf->nb_atomic_order_sequences = (1ULL << 20); in ssovf_queue_def_conf() 191 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; in ssovf_queue_def_conf() 192 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in ssovf_queue_def_conf() 204 const struct rte_event_queue_conf *queue_conf) in ssovf_queue_setup() argument 401 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) in ssovf_eth_rx_adapter_queue_add() 471 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type; in ssovf_eth_rx_adapter_queue_add() 473 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; in ssovf_eth_rx_adapter_queue_add() 474 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; in ssovf_eth_rx_adapter_queue_add() [all …]
|
| /f-stack/dpdk/drivers/event/dpaa2/ |
| H A D | dpaa2_eventdev.c | 479 struct rte_event_queue_conf *queue_conf) in dpaa2_eventdev_queue_def_conf() argument 487 queue_conf->nb_atomic_order_sequences = in dpaa2_eventdev_queue_def_conf() 489 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; in dpaa2_eventdev_queue_def_conf() 490 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in dpaa2_eventdev_queue_def_conf() 495 const struct rte_event_queue_conf *queue_conf) in dpaa2_eventdev_queue_setup() argument 502 switch (queue_conf->schedule_type) { in dpaa2_eventdev_queue_setup() 698 uint8_t ev_qid = queue_conf->ev.queue_id; in dpaa2_eventdev_eth_queue_add_all() 706 dpcon, queue_conf); in dpaa2_eventdev_eth_queue_add_all() 728 uint8_t ev_qid = queue_conf->ev.queue_id; in dpaa2_eventdev_eth_queue_add() 736 eth_dev, queue_conf); in dpaa2_eventdev_eth_queue_add() [all …]
|
| H A D | dpaa2_eventdev_selftest.c | 165 struct rte_event_queue_conf queue_conf; in _eventdev_setup() local 168 &queue_conf); in _eventdev_setup() 171 queue_conf.priority = i * step; in _eventdev_setup() 172 ret = rte_event_queue_setup(evdev, i, &queue_conf); in _eventdev_setup()
|
| /f-stack/dpdk/drivers/event/dpaa/ |
| H A D | dpaa_eventdev.c | 467 struct rte_event_queue_conf *queue_conf) in dpaa_event_queue_def_conf() argument 474 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); in dpaa_event_queue_def_conf() 475 queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS; in dpaa_event_queue_def_conf() 476 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; in dpaa_event_queue_def_conf() 477 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; in dpaa_event_queue_def_conf() 482 const struct rte_event_queue_conf *queue_conf) in dpaa_event_queue_setup() argument 489 switch (queue_conf->schedule_type) { in dpaa_event_queue_setup() 497 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; in dpaa_event_queue_setup() 637 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) in dpaa_event_eth_rx_adapter_queue_add() argument 640 uint8_t ev_qid = queue_conf->ev.queue_id; in dpaa_event_eth_rx_adapter_queue_add() [all …]
|
| /f-stack/dpdk/examples/ip_pipeline/ |
| H A D | cryptodev.c | 54 struct rte_cryptodev_qp_conf queue_conf; in cryptodev_create() local 102 queue_conf.nb_descriptors = params->queue_size; in cryptodev_create() 105 &queue_conf, socket_id); in cryptodev_create()
|
| /f-stack/dpdk/drivers/net/softnic/ |
| H A D | rte_eth_softnic_cryptodev.c | 62 struct rte_cryptodev_qp_conf queue_conf; in softnic_cryptodev_create() local 111 queue_conf.nb_descriptors = params->queue_size; in softnic_cryptodev_create() 114 &queue_conf, socket_id); in softnic_cryptodev_create()
|
| /f-stack/dpdk/drivers/baseband/null/ |
| H A D | bbdev_null.c | 103 const struct rte_bbdev_queue_conf *queue_conf) in q_setup() argument 112 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 118 q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size, in q_setup() 119 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); in q_setup()
|
| /f-stack/dpdk/lib/librte_rawdev/ |
| H A D | rte_rawdev.h | 161 rte_rawdev_obj_t queue_conf, 187 rte_rawdev_obj_t queue_conf,
|
| H A D | rte_rawdev_pmd.h | 223 rte_rawdev_obj_t queue_conf, 241 rte_rawdev_obj_t queue_conf,
|
| H A D | rte_rawdev.c | 140 rte_rawdev_obj_t queue_conf, in rte_rawdev_queue_conf_get() argument 149 return (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf, in rte_rawdev_queue_conf_get() 156 rte_rawdev_obj_t queue_conf, in rte_rawdev_queue_setup() argument 165 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf, in rte_rawdev_queue_setup()
|
| /f-stack/dpdk/app/test-eventdev/ |
| H A D | test_pipeline_common.c | 314 struct rte_event_eth_rx_adapter_queue_conf queue_conf; in pipeline_event_rx_adapter_setup() local 316 memset(&queue_conf, 0, in pipeline_event_rx_adapter_setup() 318 queue_conf.ev.sched_type = opt->sched_type_list[0]; in pipeline_event_rx_adapter_setup() 330 queue_conf.ev.queue_id = prod * stride; in pipeline_event_rx_adapter_setup() 338 &queue_conf); in pipeline_event_rx_adapter_setup()
|
| /f-stack/dpdk/drivers/raw/skeleton/ |
| H A D | skeleton_rawdev.c | 227 rte_rawdev_obj_t queue_conf, in skeleton_rawdev_queue_def_conf() argument 235 if (!dev || !queue_conf || in skeleton_rawdev_queue_def_conf() 243 rte_memcpy(queue_conf, skelq, in skeleton_rawdev_queue_def_conf() 261 rte_rawdev_obj_t queue_conf, in skeleton_rawdev_queue_setup() argument 270 if (!dev || !queue_conf || in skeleton_rawdev_queue_setup() 279 rte_memcpy(q, queue_conf, in skeleton_rawdev_queue_setup()
|
| /f-stack/dpdk/examples/eventdev_pipeline/ |
| H A D | pipeline_worker_generic.c | 447 struct rte_event_eth_rx_adapter_queue_conf queue_conf; in init_adapters() local 448 memset(&queue_conf, 0, sizeof(queue_conf)); in init_adapters() 449 queue_conf.ev.sched_type = cdata.queue_type; in init_adapters() 450 queue_conf.ev.queue_id = cdata.qid[0]; in init_adapters() 454 -1, &queue_conf); in init_adapters()
|
| H A D | pipeline_worker_tx.c | 762 struct rte_event_eth_rx_adapter_queue_conf queue_conf; in init_adapters() local 763 memset(&queue_conf, 0, sizeof(queue_conf)); in init_adapters() 764 queue_conf.ev.sched_type = cdata.queue_type; in init_adapters() 782 queue_conf.ev.queue_id = cdata.rx_stride ? in init_adapters() 786 ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &queue_conf); in init_adapters()
|
| /f-stack/dpdk/drivers/event/octeontx2/ |
| H A D | otx2_evdev_adptr.c | 313 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) in otx2_sso_rx_adapter_queue_add() argument 332 queue_conf->ev.sched_type, in otx2_sso_rx_adapter_queue_add() 333 queue_conf->ev.queue_id, port); in otx2_sso_rx_adapter_queue_add() 343 queue_conf->ev.sched_type, in otx2_sso_rx_adapter_queue_add() 344 queue_conf->ev.queue_id, port); in otx2_sso_rx_adapter_queue_add() 350 queue_conf->ev.queue_id); in otx2_sso_rx_adapter_queue_add()
|
| /f-stack/dpdk/app/test/ |
| H A D | test_event_crypto_adapter.c | 611 struct rte_event_queue_conf queue_conf; in configure_eventdev() local 649 queue_conf.nb_atomic_flows = info.max_event_queue_flows; in configure_eventdev() 650 queue_conf.nb_atomic_order_sequences = 32; in configure_eventdev() 651 queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; in configure_eventdev() 652 queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; in configure_eventdev() 653 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK; in configure_eventdev() 656 ret = rte_event_queue_setup(evdev, qid, &queue_conf); in configure_eventdev()
|
| /f-stack/dpdk/drivers/baseband/turbo_sw/ |
| H A D | bbdev_turbo_software.c | 285 const struct rte_bbdev_queue_conf *queue_conf) in q_setup() argument 293 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 312 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 333 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 353 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 373 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 394 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 415 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 436 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 455 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); in q_setup() [all …]
|
| /f-stack/dpdk/drivers/raw/ntb/ |
| H A D | ntb.c | 256 rte_rawdev_obj_t queue_conf, in ntb_queue_conf_get() argument 259 struct ntb_queue_conf *q_conf = queue_conf; in ntb_queue_conf_get() 307 rte_rawdev_obj_t queue_conf, in ntb_rxq_setup() argument 310 struct ntb_queue_conf *rxq_conf = queue_conf; in ntb_rxq_setup() 392 rte_rawdev_obj_t queue_conf, in ntb_txq_setup() argument 395 struct ntb_queue_conf *txq_conf = queue_conf; in ntb_txq_setup() 460 rte_rawdev_obj_t queue_conf, in ntb_queue_setup() argument 469 ret = ntb_txq_setup(dev, queue_id, queue_conf, conf_size); in ntb_queue_setup() 473 ret = ntb_rxq_setup(dev, queue_id, queue_conf, conf_size); in ntb_queue_setup()
|