Lines Matching refs:queue
56 static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
58 static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
59 static void free_completed_tx(struct ark_tx_queue *queue);
62 ark_tx_hw_queue_stop(struct ark_tx_queue *queue) in ark_tx_hw_queue_stop() argument
64 ark_mpu_stop(queue->mpu); in ark_tx_hw_queue_stop()
92 struct ark_tx_queue *queue; in eth_ark_xmit_pkts() local
102 queue = (struct ark_tx_queue *)vtxq; in eth_ark_xmit_pkts()
105 free_completed_tx(queue); in eth_ark_xmit_pkts()
107 prod_index_limit = queue->queue_size + queue->free_index; in eth_ark_xmit_pkts()
110 (nb < nb_pkts) && (queue->prod_index != prod_index_limit); in eth_ark_xmit_pkts()
129 queue->tx_errors += 1; in eth_ark_xmit_pkts()
137 stat = eth_ark_tx_jumbo(queue, mbuf); in eth_ark_xmit_pkts()
141 idx = queue->prod_index & queue->queue_mask; in eth_ark_xmit_pkts()
142 queue->bufs[idx] = mbuf; in eth_ark_xmit_pkts()
143 meta = &queue->meta_q[idx]; in eth_ark_xmit_pkts()
148 queue->prod_index++; in eth_ark_xmit_pkts()
160 queue->prod_index, in eth_ark_xmit_pkts()
161 queue->cons_index, in eth_ark_xmit_pkts()
162 queue->free_index); in eth_ark_xmit_pkts()
163 ark_mpu_dump(queue->mpu, in eth_ark_xmit_pkts()
165 queue->phys_qid); in eth_ark_xmit_pkts()
170 ark_mpu_set_producer(queue->mpu, queue->prod_index); in eth_ark_xmit_pkts()
177 eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf) in eth_ark_tx_jumbo() argument
185 free_queue_space = queue->queue_mask - in eth_ark_tx_jumbo()
186 (queue->prod_index - queue->free_index); in eth_ark_tx_jumbo()
193 idx = queue->prod_index & queue->queue_mask; in eth_ark_tx_jumbo()
194 queue->bufs[idx] = mbuf; in eth_ark_tx_jumbo()
195 meta = &queue->meta_q[idx]; in eth_ark_tx_jumbo()
199 queue->prod_index++; in eth_ark_tx_jumbo()
217 struct ark_tx_queue *queue; in eth_ark_tx_queue_setup() local
231 queue = rte_zmalloc_socket("Ark_txqueue", in eth_ark_tx_queue_setup()
235 if (queue == 0) { in eth_ark_tx_queue_setup()
243 queue->queue_size = nb_desc; in eth_ark_tx_queue_setup()
244 queue->queue_mask = nb_desc - 1; in eth_ark_tx_queue_setup()
245 queue->phys_qid = qidx; in eth_ark_tx_queue_setup()
246 queue->queue_index = queue_idx; in eth_ark_tx_queue_setup()
247 dev->data->tx_queues[queue_idx] = queue; in eth_ark_tx_queue_setup()
249 queue->meta_q = in eth_ark_tx_queue_setup()
254 queue->bufs = in eth_ark_tx_queue_setup()
260 if (queue->meta_q == 0 || queue->bufs == 0) { in eth_ark_tx_queue_setup()
263 rte_free(queue->meta_q); in eth_ark_tx_queue_setup()
264 rte_free(queue->bufs); in eth_ark_tx_queue_setup()
265 rte_free(queue); in eth_ark_tx_queue_setup()
269 queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET); in eth_ark_tx_queue_setup()
270 queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET); in eth_ark_tx_queue_setup()
272 status = eth_ark_tx_hw_queue_config(queue); in eth_ark_tx_queue_setup()
275 rte_free(queue->meta_q); in eth_ark_tx_queue_setup()
276 rte_free(queue->bufs); in eth_ark_tx_queue_setup()
277 rte_free(queue); in eth_ark_tx_queue_setup()
286 eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue) in eth_ark_tx_hw_queue_config() argument
292 if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta))) in eth_ark_tx_hw_queue_config()
295 queue_base = rte_malloc_virt2iova(queue); in eth_ark_tx_hw_queue_config()
296 ring_base = rte_malloc_virt2iova(queue->meta_q); in eth_ark_tx_hw_queue_config()
300 ark_mpu_stop(queue->mpu); in eth_ark_tx_hw_queue_config()
301 ark_mpu_reset(queue->mpu); in eth_ark_tx_hw_queue_config()
304 ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1); in eth_ark_tx_hw_queue_config()
311 switch (queue->queue_size) { in eth_ark_tx_hw_queue_config()
327 ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns); in eth_ark_tx_hw_queue_config()
336 struct ark_tx_queue *queue; in eth_ark_tx_queue_release() local
338 queue = (struct ark_tx_queue *)vtx_queue; in eth_ark_tx_queue_release()
340 ark_tx_hw_queue_stop(queue); in eth_ark_tx_queue_release()
342 queue->cons_index = queue->prod_index; in eth_ark_tx_queue_release()
343 free_completed_tx(queue); in eth_ark_tx_queue_release()
345 rte_free(queue->meta_q); in eth_ark_tx_queue_release()
346 rte_free(queue->bufs); in eth_ark_tx_queue_release()
347 rte_free(queue); in eth_ark_tx_queue_release()
354 struct ark_tx_queue *queue; in eth_ark_tx_queue_stop() local
357 queue = dev->data->tx_queues[queue_id]; in eth_ark_tx_queue_stop()
360 while (queue->cons_index != queue->prod_index) { in eth_ark_tx_queue_stop()
366 ark_mpu_stop(queue->mpu); in eth_ark_tx_queue_stop()
367 free_completed_tx(queue); in eth_ark_tx_queue_stop()
377 struct ark_tx_queue *queue; in eth_ark_tx_queue_start() local
379 queue = dev->data->tx_queues[queue_id]; in eth_ark_tx_queue_start()
383 ark_mpu_start(queue->mpu); in eth_ark_tx_queue_start()
391 free_completed_tx(struct ark_tx_queue *queue) in free_completed_tx() argument
397 top_index = queue->cons_index; /* read once */ in free_completed_tx()
398 while (queue->free_index != top_index) { in free_completed_tx()
399 meta = &queue->meta_q[queue->free_index & queue->queue_mask]; in free_completed_tx()
400 mbuf = queue->bufs[queue->free_index & queue->queue_mask]; in free_completed_tx()
406 queue->free_index++; in free_completed_tx()
414 struct ark_tx_queue *queue; in eth_tx_queue_stats_get() local
418 queue = vqueue; in eth_tx_queue_stats_get()
419 ddm = queue->ddm; in eth_tx_queue_stats_get()
424 stats->q_opackets[queue->queue_index] = pkts; in eth_tx_queue_stats_get()
425 stats->q_obytes[queue->queue_index] = bytes; in eth_tx_queue_stats_get()
428 stats->oerrors += queue->tx_errors; in eth_tx_queue_stats_get()
434 struct ark_tx_queue *queue; in eth_tx_queue_stats_reset() local
437 queue = vqueue; in eth_tx_queue_stats_reset()
438 ddm = queue->ddm; in eth_tx_queue_stats_reset()
441 queue->tx_errors = 0; in eth_tx_queue_stats_reset()