| /dpdk/drivers/event/dlb2/ |
| H A D | dlb2_selftest.c | 285 struct rte_event_queue_conf queue_conf; in test_single_link() local 347 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) { in test_single_link() 359 queue_conf.event_queue_cfg = 0; in test_single_link() 494 struct rte_event_queue_conf queue_conf; in test_reconfiguration_link() local 728 queue_conf.event_queue_cfg = 0; in test_reconfiguration_link() 821 struct rte_event_queue_conf queue_conf; in test_load_balanced_traffic() local 945 struct rte_event_queue_conf queue_conf; in test_directed_traffic() local 1078 struct rte_event_queue_conf queue_conf; in test_deferred_sched() local 1145 queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL; in test_deferred_sched() 1146 queue_conf.nb_atomic_order_sequences = 0; in test_deferred_sched() [all …]
|
| /dpdk/lib/eventdev/ |
| H A D | eventdev_trace.h | 44 const struct rte_event_queue_conf *queue_conf), 47 rte_trace_point_emit_u32(queue_conf->nb_atomic_flows); 48 rte_trace_point_emit_u32(queue_conf->nb_atomic_order_sequences); 49 rte_trace_point_emit_u32(queue_conf->event_queue_cfg); 50 rte_trace_point_emit_u8(queue_conf->schedule_type); 51 rte_trace_point_emit_u8(queue_conf->priority); 126 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf, 131 rte_trace_point_emit_u32(queue_conf->rx_queue_flags); 132 rte_trace_point_emit_u16(queue_conf->servicing_weight); 133 rte_trace_point_emit_u8(queue_conf->ev.queue_id); [all …]
|
| H A D | rte_eventdev.c | 504 if (queue_conf == NULL) in rte_event_queue_default_conf_get() 521 if (queue_conf && in is_valid_atomic_queue_conf() 522 !(queue_conf->event_queue_cfg & in is_valid_atomic_queue_conf() 524 ((queue_conf->event_queue_cfg & in is_valid_atomic_queue_conf() 526 (queue_conf->schedule_type in is_valid_atomic_queue_conf() 537 if (queue_conf && in is_valid_ordered_queue_conf() 538 !(queue_conf->event_queue_cfg & in is_valid_ordered_queue_conf() 540 ((queue_conf->event_queue_cfg & in is_valid_ordered_queue_conf() 542 (queue_conf->schedule_type in is_valid_ordered_queue_conf() 600 if (queue_conf == NULL) { in rte_event_queue_setup() [all …]
|
| H A D | rte_event_eth_rx_adapter.c | 2155 temp_conf = *queue_conf; in rxa_sw_add() 2160 queue_conf = &temp_conf; in rxa_sw_add() 2190 queue_conf->servicing_weight, in rxa_sw_add() 2672 rx_queue_id, queue_conf); in rte_event_eth_rx_adapter_queue_add() 2687 queue_conf); in rte_event_eth_rx_adapter_queue_add() 2695 rx_queue_id, queue_conf, ret); in rte_event_eth_rx_adapter_queue_add() 3239 if (queue_conf == NULL) { in rte_event_eth_rx_adapter_queue_conf_get() 3257 memset(queue_conf, 0, sizeof(*queue_conf)); in rte_event_eth_rx_adapter_queue_conf_get() 3258 queue_conf->rx_queue_flags = 0; in rte_event_eth_rx_adapter_queue_conf_get() 3260 queue_conf->rx_queue_flags |= in rte_event_eth_rx_adapter_queue_conf_get() [all …]
|
| H A D | eventdev_pmd.h | 313 uint8_t queue_id, struct rte_event_queue_conf *queue_conf); 330 const struct rte_event_queue_conf *queue_conf); 686 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); 736 struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
|
| /dpdk/lib/power/ |
| H A D | rte_power_pmd_mgmt.c | 250 struct queue_list_entry *queue_conf = arg; in clb_multiwait() local 259 queue_reset(lcore_conf, queue_conf); in clb_multiwait() 265 if (!queue_can_sleep(lcore_conf, queue_conf)) in clb_multiwait() 288 struct queue_list_entry *queue_conf = arg; in clb_umwait() local 292 queue_conf->n_empty_polls++; in clb_umwait() 304 queue_conf->n_empty_polls = 0; in clb_umwait() 315 struct queue_list_entry *queue_conf = arg; in clb_pause() local 323 queue_reset(lcore_conf, queue_conf); in clb_pause() 326 if (!queue_can_sleep(lcore_conf, queue_conf)) in clb_pause() 357 struct queue_list_entry *queue_conf = arg; in clb_scale_freq() local [all …]
|
| /dpdk/drivers/event/skeleton/ |
| H A D | skeleton_eventdev.c | 161 struct rte_event_queue_conf *queue_conf) in skeleton_eventdev_queue_def_conf() argument 170 queue_conf->nb_atomic_flows = (1ULL << 20); in skeleton_eventdev_queue_def_conf() 171 queue_conf->nb_atomic_order_sequences = (1ULL << 20); in skeleton_eventdev_queue_def_conf() 172 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; in skeleton_eventdev_queue_def_conf() 173 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in skeleton_eventdev_queue_def_conf() 187 const struct rte_event_queue_conf *queue_conf) in skeleton_eventdev_queue_setup() argument 194 RTE_SET_USED(queue_conf); in skeleton_eventdev_queue_setup()
|
| /dpdk/drivers/event/dpaa2/ |
| H A D | dpaa2_eventdev.c | 480 struct rte_event_queue_conf *queue_conf) in dpaa2_eventdev_queue_def_conf() argument 488 queue_conf->nb_atomic_order_sequences = in dpaa2_eventdev_queue_def_conf() 490 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; in dpaa2_eventdev_queue_def_conf() 491 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in dpaa2_eventdev_queue_def_conf() 496 const struct rte_event_queue_conf *queue_conf) in dpaa2_eventdev_queue_setup() argument 503 switch (queue_conf->schedule_type) { in dpaa2_eventdev_queue_setup() 699 uint8_t ev_qid = queue_conf->ev.queue_id; in dpaa2_eventdev_eth_queue_add_all() 707 dpcon, queue_conf); in dpaa2_eventdev_eth_queue_add_all() 729 uint8_t ev_qid = queue_conf->ev.queue_id; in dpaa2_eventdev_eth_queue_add() 737 eth_dev, queue_conf); in dpaa2_eventdev_eth_queue_add() [all …]
|
| /dpdk/drivers/event/octeontx/ |
| H A D | ssovf_evdev.c | 187 struct rte_event_queue_conf *queue_conf) in ssovf_queue_def_conf() argument 192 queue_conf->nb_atomic_flows = (1ULL << 20); in ssovf_queue_def_conf() 193 queue_conf->nb_atomic_order_sequences = (1ULL << 20); in ssovf_queue_def_conf() 194 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; in ssovf_queue_def_conf() 195 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in ssovf_queue_def_conf() 207 const struct rte_event_queue_conf *queue_conf) in ssovf_queue_setup() argument 404 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) in ssovf_eth_rx_adapter_queue_add() 474 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type; in ssovf_eth_rx_adapter_queue_add() 476 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id; in ssovf_eth_rx_adapter_queue_add() 477 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id; in ssovf_eth_rx_adapter_queue_add() [all …]
|
| /dpdk/drivers/event/dpaa/ |
| H A D | dpaa_eventdev.c | 468 struct rte_event_queue_conf *queue_conf) in dpaa_event_queue_def_conf() argument 475 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); in dpaa_event_queue_def_conf() 476 queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS; in dpaa_event_queue_def_conf() 477 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; in dpaa_event_queue_def_conf() 478 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; in dpaa_event_queue_def_conf() 483 const struct rte_event_queue_conf *queue_conf) in dpaa_event_queue_setup() argument 490 switch (queue_conf->schedule_type) { in dpaa_event_queue_setup() 498 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; in dpaa_event_queue_setup() 638 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) in dpaa_event_eth_rx_adapter_queue_add() argument 641 uint8_t ev_qid = queue_conf->ev.queue_id; in dpaa_event_eth_rx_adapter_queue_add() [all …]
|
| /dpdk/drivers/event/cnxk/ |
| H A D | cnxk_eventdev_adptr.c | 211 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) in cnxk_sso_rx_adapter_queue_add() argument 222 i, queue_conf); in cnxk_sso_rx_adapter_queue_add() 231 &queue_conf->ev, in cnxk_sso_rx_adapter_queue_add() 232 !!(queue_conf->rx_queue_flags & in cnxk_sso_rx_adapter_queue_add() 234 if (queue_conf->rx_queue_flags & in cnxk_sso_rx_adapter_queue_add() 236 cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp, in cnxk_sso_rx_adapter_queue_add() 242 queue_conf->vector_sz, in cnxk_sso_rx_adapter_queue_add() 243 queue_conf->vector_timeout_ns, in cnxk_sso_rx_adapter_queue_add() 244 queue_conf->vector_mp); in cnxk_sso_rx_adapter_queue_add() 258 queue_conf->ev.queue_id); in cnxk_sso_rx_adapter_queue_add()
|
| H A D | cnxk_eventdev.c | 288 struct rte_event_queue_conf *queue_conf) in cnxk_sso_queue_def_conf() argument 293 queue_conf->nb_atomic_flows = (1ULL << 20); in cnxk_sso_queue_def_conf() 294 queue_conf->nb_atomic_order_sequences = (1ULL << 20); in cnxk_sso_queue_def_conf() 295 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; in cnxk_sso_queue_def_conf() 296 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in cnxk_sso_queue_def_conf() 301 const struct rte_event_queue_conf *queue_conf) in cnxk_sso_queue_setup() argument 310 priority = CNXK_QOS_NORMALIZE(queue_conf->priority, 0, in cnxk_sso_queue_setup()
|
| H A D | cnxk_eventdev.h | 248 struct rte_event_queue_conf *queue_conf); 250 const struct rte_event_queue_conf *queue_conf); 303 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
|
| /dpdk/app/test-eventdev/ |
| H A D | test_pipeline_common.c | 333 struct rte_event_eth_rx_adapter_queue_conf queue_conf; in pipeline_event_rx_adapter_setup() local 335 memset(&queue_conf, 0, in pipeline_event_rx_adapter_setup() 337 queue_conf.ev.sched_type = opt->sched_type_list[0]; in pipeline_event_rx_adapter_setup() 399 queue_conf.vector_sz = opt->vector_size; in pipeline_event_rx_adapter_setup() 400 queue_conf.vector_timeout_ns = in pipeline_event_rx_adapter_setup() 402 queue_conf.rx_queue_flags |= in pipeline_event_rx_adapter_setup() 404 queue_conf.vector_mp = vector_pool; in pipeline_event_rx_adapter_setup() 410 queue_conf.ev.queue_id = prod * stride; in pipeline_event_rx_adapter_setup() 418 &queue_conf); in pipeline_event_rx_adapter_setup()
|
| /dpdk/examples/ip_pipeline/ |
| H A D | cryptodev.c | 53 struct rte_cryptodev_qp_conf queue_conf; in cryptodev_create() local 101 queue_conf.nb_descriptors = params->queue_size; in cryptodev_create() 104 &queue_conf, socket_id); in cryptodev_create()
|
| /dpdk/drivers/net/softnic/ |
| H A D | rte_eth_softnic_cryptodev.c | 62 struct rte_cryptodev_qp_conf queue_conf; in softnic_cryptodev_create() local 111 queue_conf.nb_descriptors = params->queue_size; in softnic_cryptodev_create() 114 &queue_conf, socket_id); in softnic_cryptodev_create()
|
| /dpdk/drivers/baseband/null/ |
| H A D | bbdev_null.c | 109 const struct rte_bbdev_queue_conf *queue_conf) in q_setup() argument 118 RTE_CACHE_LINE_SIZE, queue_conf->socket); in q_setup() 124 q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size, in q_setup() 125 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); in q_setup()
|
| /dpdk/examples/eventdev_pipeline/ |
| H A D | pipeline_worker_generic.c | 448 struct rte_event_eth_rx_adapter_queue_conf queue_conf; in init_adapters() local 449 memset(&queue_conf, 0, sizeof(queue_conf)); in init_adapters() 450 queue_conf.ev.sched_type = cdata.queue_type; in init_adapters() 451 queue_conf.ev.queue_id = cdata.qid[0]; in init_adapters() 455 -1, &queue_conf); in init_adapters()
|
| /dpdk/drivers/raw/skeleton/ |
| H A D | skeleton_rawdev.c | 227 rte_rawdev_obj_t queue_conf, in skeleton_rawdev_queue_def_conf() argument 235 if (!dev || !queue_conf || in skeleton_rawdev_queue_def_conf() 243 rte_memcpy(queue_conf, skelq, in skeleton_rawdev_queue_def_conf() 261 rte_rawdev_obj_t queue_conf, in skeleton_rawdev_queue_setup() argument 270 if (!dev || !queue_conf || in skeleton_rawdev_queue_setup() 279 rte_memcpy(q, queue_conf, in skeleton_rawdev_queue_setup()
|
| /dpdk/lib/rawdev/ |
| H A D | rte_rawdev.h | 161 rte_rawdev_obj_t queue_conf, 187 rte_rawdev_obj_t queue_conf,
|
| H A D | rte_rawdev_pmd.h | 227 rte_rawdev_obj_t queue_conf, 247 rte_rawdev_obj_t queue_conf,
|
| H A D | rte_rawdev.c | 126 rte_rawdev_obj_t queue_conf, in rte_rawdev_queue_conf_get() argument 135 return (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf, in rte_rawdev_queue_conf_get() 142 rte_rawdev_obj_t queue_conf, in rte_rawdev_queue_setup() argument 151 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf, in rte_rawdev_queue_setup()
|
| /dpdk/app/test/ |
| H A D | test_event_eth_rx_adapter.c | 959 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0}; in adapter_queue_conf() local 964 0, &queue_conf); in adapter_queue_conf() 968 queue_conf.ev.queue_id = 0; in adapter_queue_conf() 969 queue_conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC; in adapter_queue_conf() 970 queue_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; in adapter_queue_conf() 974 0, &queue_conf); in adapter_queue_conf() 980 0, &queue_conf); in adapter_queue_conf() 986 -1, &queue_conf); in adapter_queue_conf()
|
| H A D | test_event_crypto_adapter.c | 624 struct rte_event_queue_conf queue_conf; in configure_eventdev() local 662 queue_conf.nb_atomic_flows = info.max_event_queue_flows; in configure_eventdev() 663 queue_conf.nb_atomic_order_sequences = 32; in configure_eventdev() 664 queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; in configure_eventdev() 665 queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; in configure_eventdev() 666 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK; in configure_eventdev() 669 ret = rte_event_queue_setup(evdev, qid, &queue_conf); in configure_eventdev()
|
| /dpdk/examples/ipsec-secgw/ |
| H A D | event_helper.c | 788 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0}; in eh_rx_adapter_configure() local 855 queue_conf.ev.queue_id = conn->eventq_id; in eh_rx_adapter_configure() 856 queue_conf.ev.sched_type = em_conf->ext_params.sched_type; in eh_rx_adapter_configure() 857 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV; in eh_rx_adapter_configure() 866 queue_conf.vector_sz = em_conf->ext_params.vector_size; in eh_rx_adapter_configure() 867 queue_conf.vector_timeout_ns = em_conf->vector_tmo_ns; in eh_rx_adapter_configure() 868 queue_conf.vector_mp = vector_pool; in eh_rx_adapter_configure() 869 queue_conf.rx_queue_flags = in eh_rx_adapter_configure() 876 &queue_conf); in eh_rx_adapter_configure()
|