Lines Matching refs:queue
23 static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
24 static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
28 static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
29 static int eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
70 struct ark_rx_queue *queue, in eth_ark_rx_hw_setup() argument
77 queue_base = rte_malloc_virt2iova(queue); in eth_ark_rx_hw_setup()
81 phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q); in eth_ark_rx_hw_setup()
84 if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) { in eth_ark_rx_hw_setup()
90 ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0); in eth_ark_rx_hw_setup()
92 ark_udm_write_addr(queue->udm, phys_addr_prod_index); in eth_ark_rx_hw_setup()
95 ark_mpu_reset_stats(queue->mpu); in eth_ark_rx_hw_setup()
98 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_hw_setup()
105 eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index) in eth_ark_rx_update_cons_index() argument
107 queue->cons_index = cons_index; in eth_ark_rx_update_cons_index()
108 eth_ark_rx_seed_mbufs(queue); in eth_ark_rx_update_cons_index()
109 if (((cons_index - queue->last_cons) >= 64U)) { in eth_ark_rx_update_cons_index()
110 queue->last_cons = cons_index; in eth_ark_rx_update_cons_index()
111 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_update_cons_index()
127 struct ark_rx_queue *queue; in eth_ark_dev_rx_queue_setup() local
160 queue = rte_zmalloc_socket("Ark_rxqueue", in eth_ark_dev_rx_queue_setup()
164 if (queue == 0) { in eth_ark_dev_rx_queue_setup()
170 queue->mb_pool = mb_pool; in eth_ark_dev_rx_queue_setup()
171 queue->phys_qid = qidx; in eth_ark_dev_rx_queue_setup()
172 queue->queue_index = queue_idx; in eth_ark_dev_rx_queue_setup()
173 queue->queue_size = nb_desc; in eth_ark_dev_rx_queue_setup()
174 queue->queue_mask = nb_desc - 1; in eth_ark_dev_rx_queue_setup()
176 queue->reserve_q = in eth_ark_dev_rx_queue_setup()
181 queue->paddress_q = in eth_ark_dev_rx_queue_setup()
187 if (queue->reserve_q == 0 || queue->paddress_q == 0) { in eth_ark_dev_rx_queue_setup()
191 rte_free(queue->reserve_q); in eth_ark_dev_rx_queue_setup()
192 rte_free(queue->paddress_q); in eth_ark_dev_rx_queue_setup()
193 rte_free(queue); in eth_ark_dev_rx_queue_setup()
197 dev->data->rx_queues[queue_idx] = queue; in eth_ark_dev_rx_queue_setup()
198 queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET); in eth_ark_dev_rx_queue_setup()
199 queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET); in eth_ark_dev_rx_queue_setup()
202 status = eth_ark_rx_seed_mbufs(queue); in eth_ark_dev_rx_queue_setup()
204 if (queue->seed_index != nb_desc) { in eth_ark_dev_rx_queue_setup()
211 status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx); in eth_ark_dev_rx_queue_setup()
220 for (i = 0, mbuf = queue->reserve_q; in eth_ark_dev_rx_queue_setup()
221 i < queue->seed_index; ++i, mbuf++) { in eth_ark_dev_rx_queue_setup()
224 rte_free(queue->reserve_q); in eth_ark_dev_rx_queue_setup()
225 rte_free(queue->paddress_q); in eth_ark_dev_rx_queue_setup()
226 rte_free(queue); in eth_ark_dev_rx_queue_setup()
248 struct ark_rx_queue *queue; in eth_ark_recv_pkts() local
254 queue = (struct ark_rx_queue *)rx_queue; in eth_ark_recv_pkts()
255 if (unlikely(queue == 0)) in eth_ark_recv_pkts()
259 prod_index = queue->prod_index; in eth_ark_recv_pkts()
260 cons_index = queue->cons_index; in eth_ark_recv_pkts()
264 mbuf = queue->reserve_q[cons_index & queue->queue_mask]; in eth_ark_recv_pkts()
291 queue->phys_qid, in eth_ark_recv_pkts()
293 queue->prod_index, in eth_ark_recv_pkts()
294 queue->seed_index); in eth_ark_recv_pkts()
300 queue->udm->rt_cfg.prod_idx, in eth_ark_recv_pkts()
302 ark_mpu_dump(queue->mpu, in eth_ark_recv_pkts()
304 queue->phys_qid); in eth_ark_recv_pkts()
314 (queue, meta, mbuf, cons_index + 1); in eth_ark_recv_pkts()
326 eth_ark_rx_update_cons_index(queue, cons_index); in eth_ark_recv_pkts()
333 eth_ark_rx_jumbo(struct ark_rx_queue *queue, in eth_ark_rx_jumbo() argument
361 mbuf = queue->reserve_q[cons_index & queue->queue_mask]; in eth_ark_rx_jumbo()
376 eth_ark_rx_queue_drain(struct ark_rx_queue *queue) in eth_ark_rx_queue_drain() argument
381 cons_index = queue->cons_index; in eth_ark_rx_queue_drain()
384 while ((cons_index ^ queue->prod_index) & queue->queue_mask) { in eth_ark_rx_queue_drain()
385 mbuf = queue->reserve_q[cons_index & queue->queue_mask]; in eth_ark_rx_queue_drain()
388 eth_ark_rx_update_cons_index(queue, cons_index); in eth_ark_rx_queue_drain()
395 struct ark_rx_queue *queue; in eth_ark_dev_rx_queue_count() local
397 queue = dev->data->rx_queues[queue_id]; in eth_ark_dev_rx_queue_count()
398 return (queue->prod_index - queue->cons_index); /* mod arith */ in eth_ark_dev_rx_queue_count()
405 struct ark_rx_queue *queue; in eth_ark_rx_start_queue() local
407 queue = dev->data->rx_queues[queue_id]; in eth_ark_rx_start_queue()
408 if (queue == 0) in eth_ark_rx_start_queue()
413 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_start_queue()
414 ark_mpu_start(queue->mpu); in eth_ark_rx_start_queue()
416 ark_udm_queue_enable(queue->udm, 1); in eth_ark_rx_start_queue()
428 struct ark_rx_queue *queue; in eth_ark_rx_stop_queue() local
430 queue = dev->data->rx_queues[queue_id]; in eth_ark_rx_stop_queue()
431 if (queue == 0) in eth_ark_rx_stop_queue()
434 ark_udm_queue_enable(queue->udm, 0); in eth_ark_rx_stop_queue()
443 eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue) in eth_ark_rx_seed_mbufs() argument
445 uint32_t limit = queue->cons_index + queue->queue_size; in eth_ark_rx_seed_mbufs()
446 uint32_t seed_index = queue->seed_index; in eth_ark_rx_seed_mbufs()
449 uint32_t seed_m = queue->seed_index & queue->queue_mask; in eth_ark_rx_seed_mbufs()
454 if (unlikely(seed_m + nb > queue->queue_size)) in eth_ark_rx_seed_mbufs()
455 nb = queue->queue_size - seed_m; in eth_ark_rx_seed_mbufs()
457 struct rte_mbuf **mbufs = &queue->reserve_q[seed_m]; in eth_ark_rx_seed_mbufs()
458 int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb); in eth_ark_rx_seed_mbufs()
462 status = eth_ark_rx_seed_recovery(queue, &nb, mbufs); in eth_ark_rx_seed_mbufs()
471 queue->reserve_q[seed_m + count]; in eth_ark_rx_seed_mbufs()
477 queue->phys_qid; in eth_ark_rx_seed_mbufs()
482 queue->seed_index += nb; in eth_ark_rx_seed_mbufs()
488 queue->paddress_q[seed_m++] = in eth_ark_rx_seed_mbufs()
493 queue->paddress_q[seed_m++] = in eth_ark_rx_seed_mbufs()
498 queue->paddress_q[seed_m++] = in eth_ark_rx_seed_mbufs()
503 queue->paddress_q[seed_m++] = in eth_ark_rx_seed_mbufs()
515 eth_ark_rx_seed_recovery(struct ark_rx_queue *queue, in eth_ark_rx_seed_recovery() argument
526 status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, *pnb); in eth_ark_rx_seed_recovery()
531 *pnb, queue->queue_index, in eth_ark_rx_seed_recovery()
532 queue->seed_index - queue->cons_index); in eth_ark_rx_seed_recovery()
541 struct ark_rx_queue *queue; in eth_ark_rx_dump_queue() local
543 queue = dev->data->rx_queues[queue_id]; in eth_ark_rx_dump_queue()
545 ark_ethdev_rx_dump(msg, queue); in eth_ark_rx_dump_queue()
553 struct ark_rx_queue *queue; in eth_ark_dev_rx_queue_release() local
556 queue = (struct ark_rx_queue *)vqueue; in eth_ark_dev_rx_queue_release()
557 if (queue == 0) in eth_ark_dev_rx_queue_release()
560 ark_udm_queue_enable(queue->udm, 0); in eth_ark_dev_rx_queue_release()
562 ark_mpu_stop(queue->mpu); in eth_ark_dev_rx_queue_release()
565 eth_ark_rx_queue_drain(queue); in eth_ark_dev_rx_queue_release()
567 for (i = 0; i < queue->queue_size; ++i) in eth_ark_dev_rx_queue_release()
568 rte_pktmbuf_free(queue->reserve_q[i]); in eth_ark_dev_rx_queue_release()
570 rte_free(queue->reserve_q); in eth_ark_dev_rx_queue_release()
571 rte_free(queue->paddress_q); in eth_ark_dev_rx_queue_release()
572 rte_free(queue); in eth_ark_dev_rx_queue_release()
578 struct ark_rx_queue *queue; in eth_rx_queue_stats_get() local
581 queue = vqueue; in eth_rx_queue_stats_get()
582 if (queue == 0) in eth_rx_queue_stats_get()
584 udm = queue->udm; in eth_rx_queue_stats_get()
588 uint64_t idropped = ark_udm_dropped(queue->udm); in eth_rx_queue_stats_get()
590 stats->q_ipackets[queue->queue_index] = ipackets; in eth_rx_queue_stats_get()
591 stats->q_ibytes[queue->queue_index] = ibytes; in eth_rx_queue_stats_get()
592 stats->q_errors[queue->queue_index] = idropped; in eth_rx_queue_stats_get()
601 struct ark_rx_queue *queue; in eth_rx_queue_stats_reset() local
603 queue = vqueue; in eth_rx_queue_stats_reset()
604 if (queue == 0) in eth_rx_queue_stats_reset()
607 ark_mpu_reset_stats(queue->mpu); in eth_rx_queue_stats_reset()
608 ark_udm_queue_stats_reset(queue->udm); in eth_rx_queue_stats_reset()
615 struct ark_rx_queue *queue; in eth_ark_udm_force_close() local
623 queue = (struct ark_rx_queue *)dev->data->rx_queues[i]; in eth_ark_udm_force_close()
624 if (queue == 0) in eth_ark_udm_force_close()
627 ark_mpu_start(queue->mpu); in eth_ark_udm_force_close()
629 index = 100000 + queue->seed_index; in eth_ark_udm_force_close()
630 ark_mpu_set_producer(queue->mpu, index); in eth_ark_udm_force_close()
642 ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue) in ark_ethdev_rx_dump() argument
644 if (queue == NULL) in ark_ethdev_rx_dump()
646 ARK_PMD_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name); in ark_ethdev_rx_dump()
648 "queue_size", queue->queue_size, in ark_ethdev_rx_dump()
649 "seed_index", queue->seed_index, in ark_ethdev_rx_dump()
650 "prod_index", queue->prod_index, in ark_ethdev_rx_dump()
651 "cons_index", queue->cons_index); in ark_ethdev_rx_dump()
653 ark_mpu_dump(queue->mpu, name, queue->phys_qid); in ark_ethdev_rx_dump()
654 ark_mpu_dump_setup(queue->mpu, queue->phys_qid); in ark_ethdev_rx_dump()
655 ark_udm_dump(queue->udm, name); in ark_ethdev_rx_dump()
656 ark_udm_dump_setup(queue->udm, queue->phys_qid); in ark_ethdev_rx_dump()