| /f-stack/dpdk/examples/flow_filtering/ |
| H A D | flow_blocks.c | 9 generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, 38 generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, in generate_ipv4_flow() argument 47 struct rte_flow_action_queue queue = { .index = rx_q }; in generate_ipv4_flow()
|
| /f-stack/dpdk/app/test/ |
| H A D | virtual_pmd.c | 109 struct virtual_ethdev_queue *rx_q; in virtual_ethdev_rx_queue_setup_success() local 111 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, in virtual_ethdev_rx_queue_setup_success() 114 if (rx_q == NULL) in virtual_ethdev_rx_queue_setup_success() 117 rx_q->port_id = dev->data->port_id; in virtual_ethdev_rx_queue_setup_success() 118 rx_q->queue_id = rx_queue_id; in virtual_ethdev_rx_queue_setup_success() 120 dev->data->rx_queues[rx_queue_id] = rx_q; in virtual_ethdev_rx_queue_setup_success()
|
| /f-stack/dpdk/lib/librte_kni/ |
| H A D | rte_kni.c | 71 struct rte_kni_fifo *rx_q; /**< RX queue */ member 282 kni->rx_q = kni->m_rx_q->addr; in rte_kni_alloc() 283 kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX); in rte_kni_alloc() 439 while (kni_fifo_count(kni->rx_q) && retry--) in rte_kni_release() 442 if (kni_fifo_count(kni->rx_q)) in rte_kni_release() 607 num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num); in rte_kni_tx_burst() 615 ret = kni_fifo_put(kni->rx_q, phy_mbufs, num); in rte_kni_tx_burst()
|
| /f-stack/dpdk/drivers/net/pfe/ |
| H A D | pfe_hif_lib.c | 107 desc = client->rx_q[qno].base; in hif_lib_client_release_rx_buffers() 109 for (ii = 0; ii < client->rx_q[qno].size; ii++) { in hif_lib_client_release_rx_buffers() 150 queue = &client->rx_q[qno]; in hif_lib_client_init_rx_buffers() 165 queue = &client->rx_q[qno]; in hif_lib_client_init_rx_buffers() 345 struct hif_client_rx_queue *queue = &client->rx_q[qno]; in hif_lib_event_handler_start()
|
| H A D | pfe_hif.h | 45 struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX]; member
|
| H A D | pfe_hif_lib.h | 68 struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX]; member
|
| H A D | pfe_ethdev.c | 296 ret = hif_lib_receive_pkt(&client->rx_q[0], in pfe_eth_open() 302 ret = hif_lib_receive_pkt(&client->rx_q[0], in pfe_eth_open() 491 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; in pfe_rx_queue_setup() 492 priv->client.rx_q[queue_idx].queue_id = queue_idx; in pfe_rx_queue_setup()
|
| H A D | pfe_hif.c | 235 rx_queue = &client->rx_q[i]; in pfe_hif_client_register() 415 client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno], in pfe_hif_rx_process()
|
| /f-stack/dpdk/examples/multi_process/client_server_mp/mp_server/ |
| H A D | init.h | 18 struct rte_ring *rx_q; member
|
| H A D | init.c | 169 clients[i].rx_q = rte_ring_create(q_name, in init_shm_rings() 172 if (clients[i].rx_q == NULL) in init_shm_rings()
|
| H A D | main.c | 205 if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer, in flush_rx_queue()
|
| /f-stack/dpdk/examples/server_node_efd/server/ |
| H A D | init.h | 18 struct rte_ring *rx_q; member
|
| H A D | init.c | 189 nodes[i].rx_q = rte_ring_create(q_name, in init_shm_rings() 192 if (nodes[i].rx_q == NULL) in init_shm_rings()
|
| H A D | main.c | 225 if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer, in flush_rx_queue()
|
| /f-stack/dpdk/kernel/linux/kni/ |
| H A D | kni_net.c | 245 kni_fifo_trans_pa2va(kni, kni->rx_q, kni->free_q); in kni_net_release_fifo_phy() 371 num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx); in kni_net_rx_normal() 444 num_rq = kni_fifo_count(kni->rx_q); in kni_net_rx_lo_fifo() 466 ret = kni_fifo_get(kni->rx_q, kni->pa, num); in kni_net_rx_lo_fifo() 536 num_rq = kni_fifo_count(kni->rx_q); in kni_net_rx_lo_fifo_skb() 550 ret = kni_fifo_get(kni->rx_q, kni->pa, num); in kni_net_rx_lo_fifo_skb()
|
| H A D | kni_dev.h | 61 struct rte_kni_fifo *rx_q; member
|
| H A D | kni_misc.c | 354 kni->rx_q = iova_to_kva(current, dev_info.rx_phys); in kni_ioctl_create() 371 kni->rx_q = phys_to_virt(dev_info.rx_phys); in kni_ioctl_create() 387 (unsigned long long) dev_info.rx_phys, kni->rx_q); in kni_ioctl_create()
|
| /f-stack/dpdk/drivers/net/avp/ |
| H A D | avp_ethdev.c | 906 avp->rx_q[i] = avp_dev_translate_address(eth_dev, in avp_dev_create() 1388 struct rte_avp_fifo *rx_q; in avp_recv_scattered_pkts() local 1405 rx_q = avp->rx_q[rxq->queue_id]; in avp_recv_scattered_pkts() 1416 avail = avp_fifo_count(rx_q); in avp_recv_scattered_pkts() 1429 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count); in avp_recv_scattered_pkts() 1431 count, rx_q); in avp_recv_scattered_pkts() 1490 struct rte_avp_fifo *rx_q; in avp_recv_pkts() local 1502 rx_q = avp->rx_q[rxq->queue_id]; in avp_recv_pkts() 1513 avail = avp_fifo_count(rx_q); in avp_recv_pkts() 1526 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count); in avp_recv_pkts() [all …]
|
| /f-stack/dpdk/drivers/common/qat/ |
| H A D | qat_qp.h | 65 struct qat_queue rx_q; member
|
| H A D | qat_qp.c | 254 if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf, in qat_qp_setup() 328 qat_queue_delete(&(qp->rx_q)); in qat_qp_release() 529 queue = &qp->rx_q; in adf_configure_queues() 855 rx_queue = &(tmp_qp->rx_q); in qat_dequeue_op_burst() 922 struct qat_queue *queue = &(qp->rx_q); in qat_cq_dequeue_response()
|
| /f-stack/dpdk/doc/guides/sample_app_ug/ |
| H A D | flow_filtering.rst | 366 generate_ipv4_flow(uint16_t port_id, uint16_t rx_q, 375 struct rte_flow_action_queue queue = { .index = rx_q }; 438 struct rte_flow_action_queue queue = { .index = rx_q };
|
| H A D | server_node_efd.rst | 244 if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
|
| /f-stack/dpdk/drivers/crypto/qat/ |
| H A D | qat_sym_hw_dp.c | 717 struct qat_queue *rx_queue = &qp->rx_q; in qat_sym_dp_dequeue_burst() 800 struct qat_queue *rx_queue = &qp->rx_q; in qat_sym_dp_dequeue() 848 struct qat_queue *rx_queue = &qp->rx_q; in qat_sym_dp_update_head() 909 dp_ctx->head = qp->rx_q.head; in qat_sym_configure_dp_ctx()
|
| /f-stack/dpdk/doc/guides/prog_guide/ |
| H A D | kernel_nic_interface.rst | 280 This thread will enqueue the mbuf in the rx_q FIFO, 282 The KNI thread will poll all KNI active devices for the rx_q.
|
| /f-stack/dpdk/doc/guides/rel_notes/ |
| H A D | release_2_1.rst | 356 entries are in ``kni->rx_q`` prior to actually pulling them from the fifo. 838 Loop processing packets dequeued from rx_q was using the number of packets
|