xref: /dpdk/drivers/net/liquidio/lio_rxtx.c (revision 52415c6b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <ethdev_driver.h>
6 #include <rte_cycles.h>
7 #include <rte_malloc.h>
8 
9 #include "lio_logs.h"
10 #include "lio_struct.h"
11 #include "lio_ethdev.h"
12 #include "lio_rxtx.h"
13 
14 #define LIO_MAX_SG 12
15 /* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
16 #define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2)
17 #define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
18 
19 static void
lio_droq_compute_max_packet_bufs(struct lio_droq * droq)20 lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
21 {
22 	uint32_t count = 0;
23 
24 	do {
25 		count += droq->buffer_size;
26 	} while (count < LIO_MAX_RX_PKTLEN);
27 }
28 
29 static void
lio_droq_reset_indices(struct lio_droq * droq)30 lio_droq_reset_indices(struct lio_droq *droq)
31 {
32 	droq->read_idx	= 0;
33 	droq->write_idx	= 0;
34 	droq->refill_idx = 0;
35 	droq->refill_count = 0;
36 	rte_atomic64_set(&droq->pkts_pending, 0);
37 }
38 
39 static void
lio_droq_destroy_ring_buffers(struct lio_droq * droq)40 lio_droq_destroy_ring_buffers(struct lio_droq *droq)
41 {
42 	uint32_t i;
43 
44 	for (i = 0; i < droq->nb_desc; i++) {
45 		if (droq->recv_buf_list[i].buffer) {
46 			rte_pktmbuf_free((struct rte_mbuf *)
47 					 droq->recv_buf_list[i].buffer);
48 			droq->recv_buf_list[i].buffer = NULL;
49 		}
50 	}
51 
52 	lio_droq_reset_indices(droq);
53 }
54 
55 static int
lio_droq_setup_ring_buffers(struct lio_device * lio_dev,struct lio_droq * droq)56 lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
57 			    struct lio_droq *droq)
58 {
59 	struct lio_droq_desc *desc_ring = droq->desc_ring;
60 	uint32_t i;
61 	void *buf;
62 
63 	for (i = 0; i < droq->nb_desc; i++) {
64 		buf = rte_pktmbuf_alloc(droq->mpool);
65 		if (buf == NULL) {
66 			lio_dev_err(lio_dev, "buffer alloc failed\n");
67 			droq->stats.rx_alloc_failure++;
68 			lio_droq_destroy_ring_buffers(droq);
69 			return -ENOMEM;
70 		}
71 
72 		droq->recv_buf_list[i].buffer = buf;
73 		droq->info_list[i].length = 0;
74 
75 		/* map ring buffers into memory */
76 		desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
77 		desc_ring[i].buffer_ptr =
78 			lio_map_ring(droq->recv_buf_list[i].buffer);
79 	}
80 
81 	lio_droq_reset_indices(droq);
82 
83 	lio_droq_compute_max_packet_bufs(droq);
84 
85 	return 0;
86 }
87 
88 static void
lio_dma_zone_free(struct lio_device * lio_dev,const struct rte_memzone * mz)89 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
90 {
91 	const struct rte_memzone *mz_tmp;
92 	int ret = 0;
93 
94 	if (mz == NULL) {
95 		lio_dev_err(lio_dev, "Memzone NULL\n");
96 		return;
97 	}
98 
99 	mz_tmp = rte_memzone_lookup(mz->name);
100 	if (mz_tmp == NULL) {
101 		lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
102 		return;
103 	}
104 
105 	ret = rte_memzone_free(mz);
106 	if (ret)
107 		lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
108 }
109 
110 /**
111  *  Frees the space for descriptor ring for the droq.
112  *
113  *  @param lio_dev	- pointer to the lio device structure
114  *  @param q_no		- droq no.
115  */
116 static void
lio_delete_droq(struct lio_device * lio_dev,uint32_t q_no)117 lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
118 {
119 	struct lio_droq *droq = lio_dev->droq[q_no];
120 
121 	lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
122 
123 	lio_droq_destroy_ring_buffers(droq);
124 	rte_free(droq->recv_buf_list);
125 	droq->recv_buf_list = NULL;
126 	lio_dma_zone_free(lio_dev, droq->info_mz);
127 	lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
128 
129 	memset(droq, 0, LIO_DROQ_SIZE);
130 }
131 
132 static void *
lio_alloc_info_buffer(struct lio_device * lio_dev,struct lio_droq * droq,unsigned int socket_id)133 lio_alloc_info_buffer(struct lio_device *lio_dev,
134 		      struct lio_droq *droq, unsigned int socket_id)
135 {
136 	droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
137 						 "info_list", droq->q_no,
138 						 (droq->nb_desc *
139 							LIO_DROQ_INFO_SIZE),
140 						 RTE_CACHE_LINE_SIZE,
141 						 socket_id);
142 
143 	if (droq->info_mz == NULL)
144 		return NULL;
145 
146 	droq->info_list_dma = droq->info_mz->iova;
147 	droq->info_alloc_size = droq->info_mz->len;
148 	droq->info_base_addr = (size_t)droq->info_mz->addr;
149 
150 	return droq->info_mz->addr;
151 }
152 
153 /**
154  *  Allocates space for the descriptor ring for the droq and
155  *  sets the base addr, num desc etc in Octeon registers.
156  *
157  * @param lio_dev	- pointer to the lio device structure
158  * @param q_no		- droq no.
159  * @param app_ctx	- pointer to application context
160  * @return Success: 0	Failure: -1
161  */
162 static int
lio_init_droq(struct lio_device * lio_dev,uint32_t q_no,uint32_t num_descs,uint32_t desc_size,struct rte_mempool * mpool,unsigned int socket_id)163 lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
164 	      uint32_t num_descs, uint32_t desc_size,
165 	      struct rte_mempool *mpool, unsigned int socket_id)
166 {
167 	uint32_t c_refill_threshold;
168 	uint32_t desc_ring_size;
169 	struct lio_droq *droq;
170 
171 	lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
172 
173 	droq = lio_dev->droq[q_no];
174 	droq->lio_dev = lio_dev;
175 	droq->q_no = q_no;
176 	droq->mpool = mpool;
177 
178 	c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
179 
180 	droq->nb_desc = num_descs;
181 	droq->buffer_size = desc_size;
182 
183 	desc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE;
184 	droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
185 						      "droq", q_no,
186 						      desc_ring_size,
187 						      RTE_CACHE_LINE_SIZE,
188 						      socket_id);
189 
190 	if (droq->desc_ring_mz == NULL) {
191 		lio_dev_err(lio_dev,
192 			    "Output queue %d ring alloc failed\n", q_no);
193 		return -1;
194 	}
195 
196 	droq->desc_ring_dma = droq->desc_ring_mz->iova;
197 	droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
198 
199 	lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
200 		    q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
201 	lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
202 		    droq->nb_desc);
203 
204 	droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
205 	if (droq->info_list == NULL) {
206 		lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
207 		goto init_droq_fail;
208 	}
209 
210 	droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
211 						 (droq->nb_desc *
212 							LIO_DROQ_RECVBUF_SIZE),
213 						 RTE_CACHE_LINE_SIZE,
214 						 socket_id);
215 	if (droq->recv_buf_list == NULL) {
216 		lio_dev_err(lio_dev,
217 			    "Output queue recv buf list alloc failed\n");
218 		goto init_droq_fail;
219 	}
220 
221 	if (lio_droq_setup_ring_buffers(lio_dev, droq))
222 		goto init_droq_fail;
223 
224 	droq->refill_threshold = c_refill_threshold;
225 
226 	rte_spinlock_init(&droq->lock);
227 
228 	lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
229 
230 	lio_dev->io_qmask.oq |= (1ULL << q_no);
231 
232 	return 0;
233 
234 init_droq_fail:
235 	lio_delete_droq(lio_dev, q_no);
236 
237 	return -1;
238 }
239 
240 int
lio_setup_droq(struct lio_device * lio_dev,int oq_no,int num_descs,int desc_size,struct rte_mempool * mpool,unsigned int socket_id)241 lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
242 	       int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
243 {
244 	struct lio_droq *droq;
245 
246 	PMD_INIT_FUNC_TRACE();
247 
248 	/* Allocate the DS for the new droq. */
249 	droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
250 				  RTE_CACHE_LINE_SIZE, socket_id);
251 	if (droq == NULL)
252 		return -ENOMEM;
253 
254 	lio_dev->droq[oq_no] = droq;
255 
256 	/* Initialize the Droq */
257 	if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
258 			  socket_id)) {
259 		lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
260 		rte_free(lio_dev->droq[oq_no]);
261 		lio_dev->droq[oq_no] = NULL;
262 		return -ENOMEM;
263 	}
264 
265 	lio_dev->num_oqs++;
266 
267 	lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
268 
269 	/* Send credit for octeon output queues. credits are always
270 	 * sent after the output queue is enabled.
271 	 */
272 	rte_write32(lio_dev->droq[oq_no]->nb_desc,
273 		    lio_dev->droq[oq_no]->pkts_credit_reg);
274 	rte_wmb();
275 
276 	return 0;
277 }
278 
279 static inline uint32_t
lio_droq_get_bufcount(uint32_t buf_size,uint32_t total_len)280 lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
281 {
282 	uint32_t buf_cnt = 0;
283 
284 	while (total_len > (buf_size * buf_cnt))
285 		buf_cnt++;
286 
287 	return buf_cnt;
288 }
289 
290 /* If we were not able to refill all buffers, try to move around
291  * the buffers that were not dispatched.
292  */
293 static inline uint32_t
lio_droq_refill_pullup_descs(struct lio_droq * droq,struct lio_droq_desc * desc_ring)294 lio_droq_refill_pullup_descs(struct lio_droq *droq,
295 			     struct lio_droq_desc *desc_ring)
296 {
297 	uint32_t refill_index = droq->refill_idx;
298 	uint32_t desc_refilled = 0;
299 
300 	while (refill_index != droq->read_idx) {
301 		if (droq->recv_buf_list[refill_index].buffer) {
302 			droq->recv_buf_list[droq->refill_idx].buffer =
303 				droq->recv_buf_list[refill_index].buffer;
304 			desc_ring[droq->refill_idx].buffer_ptr =
305 				desc_ring[refill_index].buffer_ptr;
306 			droq->recv_buf_list[refill_index].buffer = NULL;
307 			desc_ring[refill_index].buffer_ptr = 0;
308 			do {
309 				droq->refill_idx = lio_incr_index(
310 							droq->refill_idx, 1,
311 							droq->nb_desc);
312 				desc_refilled++;
313 				droq->refill_count--;
314 			} while (droq->recv_buf_list[droq->refill_idx].buffer);
315 		}
316 		refill_index = lio_incr_index(refill_index, 1,
317 					      droq->nb_desc);
318 	}	/* while */
319 
320 	return desc_refilled;
321 }
322 
323 /* lio_droq_refill
324  *
325  * @param droq		- droq in which descriptors require new buffers.
326  *
327  * Description:
328  *  Called during normal DROQ processing in interrupt mode or by the poll
329  *  thread to refill the descriptors from which buffers were dispatched
330  *  to upper layers. Attempts to allocate new buffers. If that fails, moves
331  *  up buffers (that were not dispatched) to form a contiguous ring.
332  *
333  * Returns:
334  *  No of descriptors refilled.
335  *
336  * Locks:
337  * This routine is called with droq->lock held.
338  */
339 static uint32_t
lio_droq_refill(struct lio_droq * droq)340 lio_droq_refill(struct lio_droq *droq)
341 {
342 	struct lio_droq_desc *desc_ring;
343 	uint32_t desc_refilled = 0;
344 	void *buf = NULL;
345 
346 	desc_ring = droq->desc_ring;
347 
348 	while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
349 		/* If a valid buffer exists (happens if there is no dispatch),
350 		 * reuse the buffer, else allocate.
351 		 */
352 		if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
353 			buf = rte_pktmbuf_alloc(droq->mpool);
354 			/* If a buffer could not be allocated, no point in
355 			 * continuing
356 			 */
357 			if (buf == NULL) {
358 				droq->stats.rx_alloc_failure++;
359 				break;
360 			}
361 
362 			droq->recv_buf_list[droq->refill_idx].buffer = buf;
363 		}
364 
365 		desc_ring[droq->refill_idx].buffer_ptr =
366 		    lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
367 		/* Reset any previous values in the length field. */
368 		droq->info_list[droq->refill_idx].length = 0;
369 
370 		droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
371 						  droq->nb_desc);
372 		desc_refilled++;
373 		droq->refill_count--;
374 	}
375 
376 	if (droq->refill_count)
377 		desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
378 
379 	/* if droq->refill_count
380 	 * The refill count would not change in pass two. We only moved buffers
381 	 * to close the gap in the ring, but we would still have the same no. of
382 	 * buffers to refill.
383 	 */
384 	return desc_refilled;
385 }
386 
387 static int
lio_droq_fast_process_packet(struct lio_device * lio_dev,struct lio_droq * droq,struct rte_mbuf ** rx_pkts)388 lio_droq_fast_process_packet(struct lio_device *lio_dev,
389 			     struct lio_droq *droq,
390 			     struct rte_mbuf **rx_pkts)
391 {
392 	struct rte_mbuf *nicbuf = NULL;
393 	struct lio_droq_info *info;
394 	uint32_t total_len = 0;
395 	int data_total_len = 0;
396 	uint32_t pkt_len = 0;
397 	union octeon_rh *rh;
398 	int data_pkts = 0;
399 
400 	info = &droq->info_list[droq->read_idx];
401 	lio_swap_8B_data((uint64_t *)info, 2);
402 
403 	if (!info->length)
404 		return -1;
405 
406 	/* Len of resp hdr in included in the received data len. */
407 	info->length -= OCTEON_RH_SIZE;
408 	rh = &info->rh;
409 
410 	total_len += (uint32_t)info->length;
411 
412 	if (lio_opcode_slow_path(rh)) {
413 		uint32_t buf_cnt;
414 
415 		buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
416 						(uint32_t)info->length);
417 		droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
418 						droq->nb_desc);
419 		droq->refill_count += buf_cnt;
420 	} else {
421 		if (info->length <= droq->buffer_size) {
422 			if (rh->r_dh.has_hash)
423 				pkt_len = (uint32_t)(info->length - 8);
424 			else
425 				pkt_len = (uint32_t)info->length;
426 
427 			nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
428 			droq->recv_buf_list[droq->read_idx].buffer = NULL;
429 			droq->read_idx = lio_incr_index(
430 						droq->read_idx, 1,
431 						droq->nb_desc);
432 			droq->refill_count++;
433 
434 			if (likely(nicbuf != NULL)) {
435 				/* We don't have a way to pass flags yet */
436 				nicbuf->ol_flags = 0;
437 				if (rh->r_dh.has_hash) {
438 					uint64_t *hash_ptr;
439 
440 					nicbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
441 					hash_ptr = rte_pktmbuf_mtod(nicbuf,
442 								    uint64_t *);
443 					lio_swap_8B_data(hash_ptr, 1);
444 					nicbuf->hash.rss = (uint32_t)*hash_ptr;
445 					nicbuf->data_off += 8;
446 				}
447 
448 				nicbuf->pkt_len = pkt_len;
449 				nicbuf->data_len = pkt_len;
450 				nicbuf->port = lio_dev->port_id;
451 				/* Store the mbuf */
452 				rx_pkts[data_pkts++] = nicbuf;
453 				data_total_len += pkt_len;
454 			}
455 
456 			/* Prefetch buffer pointers when on a cache line
457 			 * boundary
458 			 */
459 			if ((droq->read_idx & 3) == 0) {
460 				rte_prefetch0(
461 				    &droq->recv_buf_list[droq->read_idx]);
462 				rte_prefetch0(
463 				    &droq->info_list[droq->read_idx]);
464 			}
465 		} else {
466 			struct rte_mbuf *first_buf = NULL;
467 			struct rte_mbuf *last_buf = NULL;
468 
469 			while (pkt_len < info->length) {
470 				int cpy_len = 0;
471 
472 				cpy_len = ((pkt_len + droq->buffer_size) >
473 						info->length)
474 						? ((uint32_t)info->length -
475 							pkt_len)
476 						: droq->buffer_size;
477 
478 				nicbuf =
479 				    droq->recv_buf_list[droq->read_idx].buffer;
480 				droq->recv_buf_list[droq->read_idx].buffer =
481 				    NULL;
482 
483 				if (likely(nicbuf != NULL)) {
484 					/* Note the first seg */
485 					if (!pkt_len)
486 						first_buf = nicbuf;
487 
488 					nicbuf->port = lio_dev->port_id;
489 					/* We don't have a way to pass
490 					 * flags yet
491 					 */
492 					nicbuf->ol_flags = 0;
493 					if ((!pkt_len) && (rh->r_dh.has_hash)) {
494 						uint64_t *hash_ptr;
495 
496 						nicbuf->ol_flags |=
497 						    RTE_MBUF_F_RX_RSS_HASH;
498 						hash_ptr = rte_pktmbuf_mtod(
499 						    nicbuf, uint64_t *);
500 						lio_swap_8B_data(hash_ptr, 1);
501 						nicbuf->hash.rss =
502 						    (uint32_t)*hash_ptr;
503 						nicbuf->data_off += 8;
504 						nicbuf->pkt_len = cpy_len - 8;
505 						nicbuf->data_len = cpy_len - 8;
506 					} else {
507 						nicbuf->pkt_len = cpy_len;
508 						nicbuf->data_len = cpy_len;
509 					}
510 
511 					if (pkt_len)
512 						first_buf->nb_segs++;
513 
514 					if (last_buf)
515 						last_buf->next = nicbuf;
516 
517 					last_buf = nicbuf;
518 				} else {
519 					PMD_RX_LOG(lio_dev, ERR, "no buf\n");
520 				}
521 
522 				pkt_len += cpy_len;
523 				droq->read_idx = lio_incr_index(
524 							droq->read_idx,
525 							1, droq->nb_desc);
526 				droq->refill_count++;
527 
528 				/* Prefetch buffer pointers when on a
529 				 * cache line boundary
530 				 */
531 				if ((droq->read_idx & 3) == 0) {
532 					rte_prefetch0(&droq->recv_buf_list
533 							      [droq->read_idx]);
534 
535 					rte_prefetch0(
536 					    &droq->info_list[droq->read_idx]);
537 				}
538 			}
539 			rx_pkts[data_pkts++] = first_buf;
540 			if (rh->r_dh.has_hash)
541 				data_total_len += (pkt_len - 8);
542 			else
543 				data_total_len += pkt_len;
544 		}
545 
546 		/* Inform upper layer about packet checksum verification */
547 		struct rte_mbuf *m = rx_pkts[data_pkts - 1];
548 
549 		if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
550 			m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
551 
552 		if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
553 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
554 	}
555 
556 	if (droq->refill_count >= droq->refill_threshold) {
557 		int desc_refilled = lio_droq_refill(droq);
558 
559 		/* Flush the droq descriptor data to memory to be sure
560 		 * that when we update the credits the data in memory is
561 		 * accurate.
562 		 */
563 		rte_wmb();
564 		rte_write32(desc_refilled, droq->pkts_credit_reg);
565 		/* make sure mmio write completes */
566 		rte_wmb();
567 	}
568 
569 	info->length = 0;
570 	info->rh.rh64 = 0;
571 
572 	droq->stats.pkts_received++;
573 	droq->stats.rx_pkts_received += data_pkts;
574 	droq->stats.rx_bytes_received += data_total_len;
575 	droq->stats.bytes_received += total_len;
576 
577 	return data_pkts;
578 }
579 
580 static uint32_t
lio_droq_fast_process_packets(struct lio_device * lio_dev,struct lio_droq * droq,struct rte_mbuf ** rx_pkts,uint32_t pkts_to_process)581 lio_droq_fast_process_packets(struct lio_device *lio_dev,
582 			      struct lio_droq *droq,
583 			      struct rte_mbuf **rx_pkts,
584 			      uint32_t pkts_to_process)
585 {
586 	int ret, data_pkts = 0;
587 	uint32_t pkt;
588 
589 	for (pkt = 0; pkt < pkts_to_process; pkt++) {
590 		ret = lio_droq_fast_process_packet(lio_dev, droq,
591 						   &rx_pkts[data_pkts]);
592 		if (ret < 0) {
593 			lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
594 				    lio_dev->port_id, droq->q_no,
595 				    droq->read_idx, pkts_to_process);
596 			break;
597 		}
598 		data_pkts += ret;
599 	}
600 
601 	rte_atomic64_sub(&droq->pkts_pending, pkt);
602 
603 	return data_pkts;
604 }
605 
606 static inline uint32_t
lio_droq_check_hw_for_pkts(struct lio_droq * droq)607 lio_droq_check_hw_for_pkts(struct lio_droq *droq)
608 {
609 	uint32_t last_count;
610 	uint32_t pkt_count;
611 
612 	pkt_count = rte_read32(droq->pkts_sent_reg);
613 
614 	last_count = pkt_count - droq->pkt_count;
615 	droq->pkt_count = pkt_count;
616 
617 	if (last_count)
618 		rte_atomic64_add(&droq->pkts_pending, last_count);
619 
620 	return last_count;
621 }
622 
623 uint16_t
lio_dev_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t budget)624 lio_dev_recv_pkts(void *rx_queue,
625 		  struct rte_mbuf **rx_pkts,
626 		  uint16_t budget)
627 {
628 	struct lio_droq *droq = rx_queue;
629 	struct lio_device *lio_dev = droq->lio_dev;
630 	uint32_t pkts_processed = 0;
631 	uint32_t pkt_count = 0;
632 
633 	lio_droq_check_hw_for_pkts(droq);
634 
635 	pkt_count = rte_atomic64_read(&droq->pkts_pending);
636 	if (!pkt_count)
637 		return 0;
638 
639 	if (pkt_count > budget)
640 		pkt_count = budget;
641 
642 	/* Grab the lock */
643 	rte_spinlock_lock(&droq->lock);
644 	pkts_processed = lio_droq_fast_process_packets(lio_dev,
645 						       droq, rx_pkts,
646 						       pkt_count);
647 
648 	if (droq->pkt_count) {
649 		rte_write32(droq->pkt_count, droq->pkts_sent_reg);
650 		droq->pkt_count = 0;
651 	}
652 
653 	/* Release the spin lock */
654 	rte_spinlock_unlock(&droq->lock);
655 
656 	return pkts_processed;
657 }
658 
659 void
lio_delete_droq_queue(struct lio_device * lio_dev,int oq_no)660 lio_delete_droq_queue(struct lio_device *lio_dev,
661 		      int oq_no)
662 {
663 	lio_delete_droq(lio_dev, oq_no);
664 	lio_dev->num_oqs--;
665 	rte_free(lio_dev->droq[oq_no]);
666 	lio_dev->droq[oq_no] = NULL;
667 }
668 
669 /**
670  *  lio_init_instr_queue()
671  *  @param lio_dev	- pointer to the lio device structure.
672  *  @param txpciq	- queue to be initialized.
673  *
674  *  Called at driver init time for each input queue. iq_conf has the
675  *  configuration parameters for the queue.
676  *
677  *  @return  Success: 0	Failure: -1
678  */
679 static int
lio_init_instr_queue(struct lio_device * lio_dev,union octeon_txpciq txpciq,uint32_t num_descs,unsigned int socket_id)680 lio_init_instr_queue(struct lio_device *lio_dev,
681 		     union octeon_txpciq txpciq,
682 		     uint32_t num_descs, unsigned int socket_id)
683 {
684 	uint32_t iq_no = (uint32_t)txpciq.s.q_no;
685 	struct lio_instr_queue *iq;
686 	uint32_t instr_type;
687 	uint32_t q_size;
688 
689 	instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
690 
691 	q_size = instr_type * num_descs;
692 	iq = lio_dev->instr_queue[iq_no];
693 	iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
694 					     "instr_queue", iq_no, q_size,
695 					     RTE_CACHE_LINE_SIZE,
696 					     socket_id);
697 	if (iq->iq_mz == NULL) {
698 		lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
699 			    iq_no);
700 		return -1;
701 	}
702 
703 	iq->base_addr_dma = iq->iq_mz->iova;
704 	iq->base_addr = (uint8_t *)iq->iq_mz->addr;
705 
706 	iq->nb_desc = num_descs;
707 
708 	/* Initialize a list to holds requests that have been posted to Octeon
709 	 * but has yet to be fetched by octeon
710 	 */
711 	iq->request_list = rte_zmalloc_socket("request_list",
712 					      sizeof(*iq->request_list) *
713 							num_descs,
714 					      RTE_CACHE_LINE_SIZE,
715 					      socket_id);
716 	if (iq->request_list == NULL) {
717 		lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
718 			    iq_no);
719 		lio_dma_zone_free(lio_dev, iq->iq_mz);
720 		return -1;
721 	}
722 
723 	lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
724 		    iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
725 		    iq->nb_desc);
726 
727 	iq->lio_dev = lio_dev;
728 	iq->txpciq.txpciq64 = txpciq.txpciq64;
729 	iq->fill_cnt = 0;
730 	iq->host_write_index = 0;
731 	iq->lio_read_index = 0;
732 	iq->flush_index = 0;
733 
734 	rte_atomic64_set(&iq->instr_pending, 0);
735 
736 	/* Initialize the spinlock for this instruction queue */
737 	rte_spinlock_init(&iq->lock);
738 	rte_spinlock_init(&iq->post_lock);
739 
740 	rte_atomic64_clear(&iq->iq_flush_running);
741 
742 	lio_dev->io_qmask.iq |= (1ULL << iq_no);
743 
744 	/* Set the 32B/64B mode for each input queue */
745 	lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
746 	iq->iqcmd_64B = (instr_type == 64);
747 
748 	lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
749 
750 	return 0;
751 }
752 
753 int
lio_setup_instr_queue0(struct lio_device * lio_dev)754 lio_setup_instr_queue0(struct lio_device *lio_dev)
755 {
756 	union octeon_txpciq txpciq;
757 	uint32_t num_descs = 0;
758 	uint32_t iq_no = 0;
759 
760 	num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
761 
762 	lio_dev->num_iqs = 0;
763 
764 	lio_dev->instr_queue[0] = rte_zmalloc(NULL,
765 					sizeof(struct lio_instr_queue), 0);
766 	if (lio_dev->instr_queue[0] == NULL)
767 		return -ENOMEM;
768 
769 	lio_dev->instr_queue[0]->q_index = 0;
770 	lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
771 	txpciq.txpciq64 = 0;
772 	txpciq.s.q_no = iq_no;
773 	txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
774 	txpciq.s.use_qpg = 0;
775 	txpciq.s.qpg = 0;
776 	if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
777 		rte_free(lio_dev->instr_queue[0]);
778 		lio_dev->instr_queue[0] = NULL;
779 		return -1;
780 	}
781 
782 	lio_dev->num_iqs++;
783 
784 	return 0;
785 }
786 
787 /**
788  *  lio_delete_instr_queue()
789  *  @param lio_dev	- pointer to the lio device structure.
790  *  @param iq_no	- queue to be deleted.
791  *
792  *  Called at driver unload time for each input queue. Deletes all
793  *  allocated resources for the input queue.
794  */
795 static void
lio_delete_instr_queue(struct lio_device * lio_dev,uint32_t iq_no)796 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
797 {
798 	struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
799 
800 	rte_free(iq->request_list);
801 	iq->request_list = NULL;
802 	lio_dma_zone_free(lio_dev, iq->iq_mz);
803 }
804 
805 void
lio_free_instr_queue0(struct lio_device * lio_dev)806 lio_free_instr_queue0(struct lio_device *lio_dev)
807 {
808 	lio_delete_instr_queue(lio_dev, 0);
809 	rte_free(lio_dev->instr_queue[0]);
810 	lio_dev->instr_queue[0] = NULL;
811 	lio_dev->num_iqs--;
812 }
813 
814 /* Return 0 on success, -1 on failure */
815 int
lio_setup_iq(struct lio_device * lio_dev,int q_index,union octeon_txpciq txpciq,uint32_t num_descs,void * app_ctx,unsigned int socket_id)816 lio_setup_iq(struct lio_device *lio_dev, int q_index,
817 	     union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
818 	     unsigned int socket_id)
819 {
820 	uint32_t iq_no = (uint32_t)txpciq.s.q_no;
821 
822 	lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
823 						sizeof(struct lio_instr_queue),
824 						RTE_CACHE_LINE_SIZE, socket_id);
825 	if (lio_dev->instr_queue[iq_no] == NULL)
826 		return -1;
827 
828 	lio_dev->instr_queue[iq_no]->q_index = q_index;
829 	lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
830 
831 	if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) {
832 		rte_free(lio_dev->instr_queue[iq_no]);
833 		lio_dev->instr_queue[iq_no] = NULL;
834 		return -1;
835 	}
836 
837 	lio_dev->num_iqs++;
838 
839 	return 0;
840 }
841 
842 int
lio_wait_for_instr_fetch(struct lio_device * lio_dev)843 lio_wait_for_instr_fetch(struct lio_device *lio_dev)
844 {
845 	int pending, instr_cnt;
846 	int i, retry = 1000;
847 
848 	do {
849 		instr_cnt = 0;
850 
851 		for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {
852 			if (!(lio_dev->io_qmask.iq & (1ULL << i)))
853 				continue;
854 
855 			if (lio_dev->instr_queue[i] == NULL)
856 				break;
857 
858 			pending = rte_atomic64_read(
859 			    &lio_dev->instr_queue[i]->instr_pending);
860 			if (pending)
861 				lio_flush_iq(lio_dev, lio_dev->instr_queue[i]);
862 
863 			instr_cnt += pending;
864 		}
865 
866 		if (instr_cnt == 0)
867 			break;
868 
869 		rte_delay_ms(1);
870 
871 	} while (retry-- && instr_cnt);
872 
873 	return instr_cnt;
874 }
875 
876 static inline void
lio_ring_doorbell(struct lio_device * lio_dev,struct lio_instr_queue * iq)877 lio_ring_doorbell(struct lio_device *lio_dev,
878 		  struct lio_instr_queue *iq)
879 {
880 	if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
881 		rte_write32(iq->fill_cnt, iq->doorbell_reg);
882 		/* make sure doorbell write goes through */
883 		rte_wmb();
884 		iq->fill_cnt = 0;
885 	}
886 }
887 
888 static inline void
copy_cmd_into_iq(struct lio_instr_queue * iq,uint8_t * cmd)889 copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
890 {
891 	uint8_t *iqptr, cmdsize;
892 
893 	cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
894 	iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
895 
896 	rte_memcpy(iqptr, cmd, cmdsize);
897 }
898 
899 static inline struct lio_iq_post_status
post_command2(struct lio_instr_queue * iq,uint8_t * cmd)900 post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
901 {
902 	struct lio_iq_post_status st;
903 
904 	st.status = LIO_IQ_SEND_OK;
905 
906 	/* This ensures that the read index does not wrap around to the same
907 	 * position if queue gets full before Octeon could fetch any instr.
908 	 */
909 	if (rte_atomic64_read(&iq->instr_pending) >=
910 			(int32_t)(iq->nb_desc - 1)) {
911 		st.status = LIO_IQ_SEND_FAILED;
912 		st.index = -1;
913 		return st;
914 	}
915 
916 	if (rte_atomic64_read(&iq->instr_pending) >=
917 			(int32_t)(iq->nb_desc - 2))
918 		st.status = LIO_IQ_SEND_STOP;
919 
920 	copy_cmd_into_iq(iq, cmd);
921 
922 	/* "index" is returned, host_write_index is modified. */
923 	st.index = iq->host_write_index;
924 	iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
925 					      iq->nb_desc);
926 	iq->fill_cnt++;
927 
928 	/* Flush the command into memory. We need to be sure the data is in
929 	 * memory before indicating that the instruction is pending.
930 	 */
931 	rte_wmb();
932 
933 	rte_atomic64_inc(&iq->instr_pending);
934 
935 	return st;
936 }
937 
938 static inline void
lio_add_to_request_list(struct lio_instr_queue * iq,int idx,void * buf,int reqtype)939 lio_add_to_request_list(struct lio_instr_queue *iq,
940 			int idx, void *buf, int reqtype)
941 {
942 	iq->request_list[idx].buf = buf;
943 	iq->request_list[idx].reqtype = reqtype;
944 }
945 
946 static inline void
lio_free_netsgbuf(void * buf)947 lio_free_netsgbuf(void *buf)
948 {
949 	struct lio_buf_free_info *finfo = buf;
950 	struct lio_device *lio_dev = finfo->lio_dev;
951 	struct rte_mbuf *m = finfo->mbuf;
952 	struct lio_gather *g = finfo->g;
953 	uint8_t iq = finfo->iq_no;
954 
955 	/* This will take care of multiple segments also */
956 	rte_pktmbuf_free(m);
957 
958 	rte_spinlock_lock(&lio_dev->glist_lock[iq]);
959 	STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);
960 	rte_spinlock_unlock(&lio_dev->glist_lock[iq]);
961 	rte_free(finfo);
962 }
963 
964 /* Can only run in process context */
965 static int
lio_process_iq_request_list(struct lio_device * lio_dev,struct lio_instr_queue * iq)966 lio_process_iq_request_list(struct lio_device *lio_dev,
967 			    struct lio_instr_queue *iq)
968 {
969 	struct octeon_instr_irh *irh = NULL;
970 	uint32_t old = iq->flush_index;
971 	struct lio_soft_command *sc;
972 	uint32_t inst_count = 0;
973 	int reqtype;
974 	void *buf;
975 
976 	while (old != iq->lio_read_index) {
977 		reqtype = iq->request_list[old].reqtype;
978 		buf     = iq->request_list[old].buf;
979 
980 		if (reqtype == LIO_REQTYPE_NONE)
981 			goto skip_this;
982 
983 		switch (reqtype) {
984 		case LIO_REQTYPE_NORESP_NET:
985 			rte_pktmbuf_free((struct rte_mbuf *)buf);
986 			break;
987 		case LIO_REQTYPE_NORESP_NET_SG:
988 			lio_free_netsgbuf(buf);
989 			break;
990 		case LIO_REQTYPE_SOFT_COMMAND:
991 			sc = buf;
992 			irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
993 			if (irh->rflag) {
994 				/* We're expecting a response from Octeon.
995 				 * It's up to lio_process_ordered_list() to
996 				 * process sc. Add sc to the ordered soft
997 				 * command response list because we expect
998 				 * a response from Octeon.
999 				 */
1000 				rte_spinlock_lock(&lio_dev->response_list.lock);
1001 				rte_atomic64_inc(
1002 				    &lio_dev->response_list.pending_req_count);
1003 				STAILQ_INSERT_TAIL(
1004 					&lio_dev->response_list.head,
1005 					&sc->node, entries);
1006 				rte_spinlock_unlock(
1007 						&lio_dev->response_list.lock);
1008 			} else {
1009 				if (sc->callback) {
1010 					/* This callback must not sleep */
1011 					sc->callback(LIO_REQUEST_DONE,
1012 						     sc->callback_arg);
1013 				}
1014 			}
1015 			break;
1016 		default:
1017 			lio_dev_err(lio_dev,
1018 				    "Unknown reqtype: %d buf: %p at idx %d\n",
1019 				    reqtype, buf, old);
1020 		}
1021 
1022 		iq->request_list[old].buf = NULL;
1023 		iq->request_list[old].reqtype = 0;
1024 
1025 skip_this:
1026 		inst_count++;
1027 		old = lio_incr_index(old, 1, iq->nb_desc);
1028 	}
1029 
1030 	iq->flush_index = old;
1031 
1032 	return inst_count;
1033 }
1034 
1035 static void
lio_update_read_index(struct lio_instr_queue * iq)1036 lio_update_read_index(struct lio_instr_queue *iq)
1037 {
1038 	uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);
1039 	uint32_t last_done;
1040 
1041 	last_done = pkt_in_done - iq->pkt_in_done;
1042 	iq->pkt_in_done = pkt_in_done;
1043 
1044 	/* Add last_done and modulo with the IQ size to get new index */
1045 	iq->lio_read_index = (iq->lio_read_index +
1046 			(uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
1047 			iq->nb_desc;
1048 }
1049 
1050 int
lio_flush_iq(struct lio_device * lio_dev,struct lio_instr_queue * iq)1051 lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
1052 {
1053 	uint32_t inst_processed = 0;
1054 	int tx_done = 1;
1055 
1056 	if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)
1057 		return tx_done;
1058 
1059 	rte_spinlock_lock(&iq->lock);
1060 
1061 	lio_update_read_index(iq);
1062 
1063 	do {
1064 		/* Process any outstanding IQ packets. */
1065 		if (iq->flush_index == iq->lio_read_index)
1066 			break;
1067 
1068 		inst_processed = lio_process_iq_request_list(lio_dev, iq);
1069 
1070 		if (inst_processed) {
1071 			rte_atomic64_sub(&iq->instr_pending, inst_processed);
1072 			iq->stats.instr_processed += inst_processed;
1073 		}
1074 
1075 		inst_processed = 0;
1076 
1077 	} while (1);
1078 
1079 	rte_spinlock_unlock(&iq->lock);
1080 
1081 	rte_atomic64_clear(&iq->iq_flush_running);
1082 
1083 	return tx_done;
1084 }
1085 
1086 static int
lio_send_command(struct lio_device * lio_dev,uint32_t iq_no,void * cmd,void * buf,uint32_t datasize,uint32_t reqtype)1087 lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
1088 		 void *buf, uint32_t datasize, uint32_t reqtype)
1089 {
1090 	struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1091 	struct lio_iq_post_status st;
1092 
1093 	rte_spinlock_lock(&iq->post_lock);
1094 
1095 	st = post_command2(iq, cmd);
1096 
1097 	if (st.status != LIO_IQ_SEND_FAILED) {
1098 		lio_add_to_request_list(iq, st.index, buf, reqtype);
1099 		LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,
1100 					      datasize);
1101 		LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);
1102 
1103 		lio_ring_doorbell(lio_dev, iq);
1104 	} else {
1105 		LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);
1106 	}
1107 
1108 	rte_spinlock_unlock(&iq->post_lock);
1109 
1110 	return st.status;
1111 }
1112 
1113 void
lio_prepare_soft_command(struct lio_device * lio_dev,struct lio_soft_command * sc,uint8_t opcode,uint8_t subcode,uint32_t irh_ossp,uint64_t ossp0,uint64_t ossp1)1114 lio_prepare_soft_command(struct lio_device *lio_dev,
1115 			 struct lio_soft_command *sc, uint8_t opcode,
1116 			 uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
1117 			 uint64_t ossp1)
1118 {
1119 	struct octeon_instr_pki_ih3 *pki_ih3;
1120 	struct octeon_instr_ih3 *ih3;
1121 	struct octeon_instr_irh *irh;
1122 	struct octeon_instr_rdp *rdp;
1123 
1124 	RTE_ASSERT(opcode <= 15);
1125 	RTE_ASSERT(subcode <= 127);
1126 
1127 	ih3	  = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1128 
1129 	ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
1130 
1131 	pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
1132 
1133 	pki_ih3->w	= 1;
1134 	pki_ih3->raw	= 1;
1135 	pki_ih3->utag	= 1;
1136 	pki_ih3->uqpg	= lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
1137 	pki_ih3->utt	= 1;
1138 
1139 	pki_ih3->tag	= LIO_CONTROL;
1140 	pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
1141 	pki_ih3->qpg	= lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
1142 	pki_ih3->pm	= 0x7;
1143 	pki_ih3->sl	= 8;
1144 
1145 	if (sc->datasize)
1146 		ih3->dlengsz = sc->datasize;
1147 
1148 	irh		= (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1149 	irh->opcode	= opcode;
1150 	irh->subcode	= subcode;
1151 
1152 	/* opcode/subcode specific parameters (ossp) */
1153 	irh->ossp = irh_ossp;
1154 	sc->cmd.cmd3.ossp[0] = ossp0;
1155 	sc->cmd.cmd3.ossp[1] = ossp1;
1156 
1157 	if (sc->rdatasize) {
1158 		rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
1159 		rdp->pcie_port = lio_dev->pcie_port;
1160 		rdp->rlen      = sc->rdatasize;
1161 		irh->rflag = 1;
1162 		/* PKI IH3 */
1163 		ih3->fsz    = OCTEON_SOFT_CMD_RESP_IH3;
1164 	} else {
1165 		irh->rflag = 0;
1166 		/* PKI IH3 */
1167 		ih3->fsz    = OCTEON_PCI_CMD_O3;
1168 	}
1169 }
1170 
1171 int
lio_send_soft_command(struct lio_device * lio_dev,struct lio_soft_command * sc)1172 lio_send_soft_command(struct lio_device *lio_dev,
1173 		      struct lio_soft_command *sc)
1174 {
1175 	struct octeon_instr_ih3 *ih3;
1176 	struct octeon_instr_irh *irh;
1177 	uint32_t len = 0;
1178 
1179 	ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1180 	if (ih3->dlengsz) {
1181 		RTE_ASSERT(sc->dmadptr);
1182 		sc->cmd.cmd3.dptr = sc->dmadptr;
1183 	}
1184 
1185 	irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1186 	if (irh->rflag) {
1187 		RTE_ASSERT(sc->dmarptr);
1188 		RTE_ASSERT(sc->status_word != NULL);
1189 		*sc->status_word = LIO_COMPLETION_WORD_INIT;
1190 		sc->cmd.cmd3.rptr = sc->dmarptr;
1191 	}
1192 
1193 	len = (uint32_t)ih3->dlengsz;
1194 
1195 	if (sc->wait_time)
1196 		sc->timeout = lio_uptime + sc->wait_time;
1197 
1198 	return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
1199 				LIO_REQTYPE_SOFT_COMMAND);
1200 }
1201 
1202 int
lio_setup_sc_buffer_pool(struct lio_device * lio_dev)1203 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
1204 {
1205 	char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
1206 	uint16_t buf_size;
1207 
1208 	buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
1209 	snprintf(sc_pool_name, sizeof(sc_pool_name),
1210 		 "lio_sc_pool_%u", lio_dev->port_id);
1211 	lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
1212 						LIO_MAX_SOFT_COMMAND_BUFFERS,
1213 						0, 0, buf_size, SOCKET_ID_ANY);
1214 	return 0;
1215 }
1216 
1217 void
lio_free_sc_buffer_pool(struct lio_device * lio_dev)1218 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
1219 {
1220 	rte_mempool_free(lio_dev->sc_buf_pool);
1221 }
1222 
1223 struct lio_soft_command *
lio_alloc_soft_command(struct lio_device * lio_dev,uint32_t datasize,uint32_t rdatasize,uint32_t ctxsize)1224 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
1225 		       uint32_t rdatasize, uint32_t ctxsize)
1226 {
1227 	uint32_t offset = sizeof(struct lio_soft_command);
1228 	struct lio_soft_command *sc;
1229 	struct rte_mbuf *m;
1230 	uint64_t dma_addr;
1231 
1232 	RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
1233 		   LIO_SOFT_COMMAND_BUFFER_SIZE);
1234 
1235 	m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
1236 	if (m == NULL) {
1237 		lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
1238 		return NULL;
1239 	}
1240 
1241 	/* set rte_mbuf data size and there is only 1 segment */
1242 	m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1243 	m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1244 
1245 	/* use rte_mbuf buffer for soft command */
1246 	sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
1247 	memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
1248 	sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
1249 	sc->dma_addr = rte_mbuf_data_iova(m);
1250 	sc->mbuf = m;
1251 
1252 	dma_addr = sc->dma_addr;
1253 
1254 	if (ctxsize) {
1255 		sc->ctxptr = (uint8_t *)sc + offset;
1256 		sc->ctxsize = ctxsize;
1257 	}
1258 
1259 	/* Start data at 128 byte boundary */
1260 	offset = (offset + ctxsize + 127) & 0xffffff80;
1261 
1262 	if (datasize) {
1263 		sc->virtdptr = (uint8_t *)sc + offset;
1264 		sc->dmadptr = dma_addr + offset;
1265 		sc->datasize = datasize;
1266 	}
1267 
1268 	/* Start rdata at 128 byte boundary */
1269 	offset = (offset + datasize + 127) & 0xffffff80;
1270 
1271 	if (rdatasize) {
1272 		RTE_ASSERT(rdatasize >= 16);
1273 		sc->virtrptr = (uint8_t *)sc + offset;
1274 		sc->dmarptr = dma_addr + offset;
1275 		sc->rdatasize = rdatasize;
1276 		sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
1277 					       rdatasize - 8);
1278 	}
1279 
1280 	return sc;
1281 }
1282 
1283 void
lio_free_soft_command(struct lio_soft_command * sc)1284 lio_free_soft_command(struct lio_soft_command *sc)
1285 {
1286 	rte_pktmbuf_free(sc->mbuf);
1287 }
1288 
1289 void
lio_setup_response_list(struct lio_device * lio_dev)1290 lio_setup_response_list(struct lio_device *lio_dev)
1291 {
1292 	STAILQ_INIT(&lio_dev->response_list.head);
1293 	rte_spinlock_init(&lio_dev->response_list.lock);
1294 	rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
1295 }
1296 
1297 int
lio_process_ordered_list(struct lio_device * lio_dev)1298 lio_process_ordered_list(struct lio_device *lio_dev)
1299 {
1300 	int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
1301 	struct lio_response_list *ordered_sc_list;
1302 	struct lio_soft_command *sc;
1303 	int request_complete = 0;
1304 	uint64_t status64;
1305 	uint32_t status;
1306 
1307 	ordered_sc_list = &lio_dev->response_list;
1308 
1309 	do {
1310 		rte_spinlock_lock(&ordered_sc_list->lock);
1311 
1312 		if (STAILQ_EMPTY(&ordered_sc_list->head)) {
1313 			/* ordered_sc_list is empty; there is
1314 			 * nothing to process
1315 			 */
1316 			rte_spinlock_unlock(&ordered_sc_list->lock);
1317 			return -1;
1318 		}
1319 
1320 		sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
1321 					     struct lio_soft_command, node);
1322 
1323 		status = LIO_REQUEST_PENDING;
1324 
1325 		/* check if octeon has finished DMA'ing a response
1326 		 * to where rptr is pointing to
1327 		 */
1328 		status64 = *sc->status_word;
1329 
1330 		if (status64 != LIO_COMPLETION_WORD_INIT) {
1331 			/* This logic ensures that all 64b have been written.
1332 			 * 1. check byte 0 for non-FF
1333 			 * 2. if non-FF, then swap result from BE to host order
1334 			 * 3. check byte 7 (swapped to 0) for non-FF
1335 			 * 4. if non-FF, use the low 32-bit status code
1336 			 * 5. if either byte 0 or byte 7 is FF, don't use status
1337 			 */
1338 			if ((status64 & 0xff) != 0xff) {
1339 				lio_swap_8B_data(&status64, 1);
1340 				if (((status64 & 0xff) != 0xff)) {
1341 					/* retrieve 16-bit firmware status */
1342 					status = (uint32_t)(status64 &
1343 							    0xffffULL);
1344 					if (status) {
1345 						status =
1346 						LIO_FIRMWARE_STATUS_CODE(
1347 									status);
1348 					} else {
1349 						/* i.e. no error */
1350 						status = LIO_REQUEST_DONE;
1351 					}
1352 				}
1353 			}
1354 		} else if ((sc->timeout && lio_check_timeout(lio_uptime,
1355 							     sc->timeout))) {
1356 			lio_dev_err(lio_dev,
1357 				    "cmd failed, timeout (%ld, %ld)\n",
1358 				    (long)lio_uptime, (long)sc->timeout);
1359 			status = LIO_REQUEST_TIMEOUT;
1360 		}
1361 
1362 		if (status != LIO_REQUEST_PENDING) {
1363 			/* we have received a response or we have timed out.
1364 			 * remove node from linked list
1365 			 */
1366 			STAILQ_REMOVE(&ordered_sc_list->head,
1367 				      &sc->node, lio_stailq_node, entries);
1368 			rte_atomic64_dec(
1369 			    &lio_dev->response_list.pending_req_count);
1370 			rte_spinlock_unlock(&ordered_sc_list->lock);
1371 
1372 			if (sc->callback)
1373 				sc->callback(status, sc->callback_arg);
1374 
1375 			request_complete++;
1376 		} else {
1377 			/* no response yet */
1378 			request_complete = 0;
1379 			rte_spinlock_unlock(&ordered_sc_list->lock);
1380 		}
1381 
1382 		/* If we hit the Max Ordered requests to process every loop,
1383 		 * we quit and let this function be invoked the next time
1384 		 * the poll thread runs to process the remaining requests.
1385 		 * This function can take up the entire CPU if there is
1386 		 * no upper limit to the requests processed.
1387 		 */
1388 		if (request_complete >= resp_to_process)
1389 			break;
1390 	} while (request_complete);
1391 
1392 	return 0;
1393 }
1394 
1395 static inline struct lio_stailq_node *
list_delete_first_node(struct lio_stailq_head * head)1396 list_delete_first_node(struct lio_stailq_head *head)
1397 {
1398 	struct lio_stailq_node *node;
1399 
1400 	if (STAILQ_EMPTY(head))
1401 		node = NULL;
1402 	else
1403 		node = STAILQ_FIRST(head);
1404 
1405 	if (node)
1406 		STAILQ_REMOVE(head, node, lio_stailq_node, entries);
1407 
1408 	return node;
1409 }
1410 
1411 void
lio_delete_sglist(struct lio_instr_queue * txq)1412 lio_delete_sglist(struct lio_instr_queue *txq)
1413 {
1414 	struct lio_device *lio_dev = txq->lio_dev;
1415 	int iq_no = txq->q_index;
1416 	struct lio_gather *g;
1417 
1418 	if (lio_dev->glist_head == NULL)
1419 		return;
1420 
1421 	do {
1422 		g = (struct lio_gather *)list_delete_first_node(
1423 						&lio_dev->glist_head[iq_no]);
1424 		if (g) {
1425 			if (g->sg)
1426 				rte_free(
1427 				    (void *)((unsigned long)g->sg - g->adjust));
1428 			rte_free(g);
1429 		}
1430 	} while (g);
1431 }
1432 
1433 /**
1434  * \brief Setup gather lists
1435  * @param lio per-network private data
1436  */
1437 int
lio_setup_sglists(struct lio_device * lio_dev,int iq_no,int fw_mapped_iq,int num_descs,unsigned int socket_id)1438 lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
1439 		  int fw_mapped_iq, int num_descs, unsigned int socket_id)
1440 {
1441 	struct lio_gather *g;
1442 	int i;
1443 
1444 	rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
1445 
1446 	STAILQ_INIT(&lio_dev->glist_head[iq_no]);
1447 
1448 	for (i = 0; i < num_descs; i++) {
1449 		g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
1450 				       socket_id);
1451 		if (g == NULL) {
1452 			lio_dev_err(lio_dev,
1453 				    "lio_gather memory allocation failed for qno %d\n",
1454 				    iq_no);
1455 			break;
1456 		}
1457 
1458 		g->sg_size =
1459 		    ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
1460 
1461 		g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
1462 					   RTE_CACHE_LINE_SIZE, socket_id);
1463 		if (g->sg == NULL) {
1464 			lio_dev_err(lio_dev,
1465 				    "sg list memory allocation failed for qno %d\n",
1466 				    iq_no);
1467 			rte_free(g);
1468 			break;
1469 		}
1470 
1471 		/* The gather component should be aligned on 64-bit boundary */
1472 		if (((unsigned long)g->sg) & 7) {
1473 			g->adjust = 8 - (((unsigned long)g->sg) & 7);
1474 			g->sg =
1475 			    (struct lio_sg_entry *)((unsigned long)g->sg +
1476 						       g->adjust);
1477 		}
1478 
1479 		STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
1480 				   entries);
1481 	}
1482 
1483 	if (i != num_descs) {
1484 		lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
1485 		return -ENOMEM;
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 void
lio_delete_instruction_queue(struct lio_device * lio_dev,int iq_no)1492 lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
1493 {
1494 	lio_delete_instr_queue(lio_dev, iq_no);
1495 	rte_free(lio_dev->instr_queue[iq_no]);
1496 	lio_dev->instr_queue[iq_no] = NULL;
1497 	lio_dev->num_iqs--;
1498 }
1499 
1500 static inline uint32_t
lio_iq_get_available(struct lio_device * lio_dev,uint32_t q_no)1501 lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
1502 {
1503 	return ((lio_dev->instr_queue[q_no]->nb_desc - 1) -
1504 		(uint32_t)rte_atomic64_read(
1505 				&lio_dev->instr_queue[q_no]->instr_pending));
1506 }
1507 
1508 static inline int
lio_iq_is_full(struct lio_device * lio_dev,uint32_t q_no)1509 lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
1510 {
1511 	return ((uint32_t)rte_atomic64_read(
1512 				&lio_dev->instr_queue[q_no]->instr_pending) >=
1513 				(lio_dev->instr_queue[q_no]->nb_desc - 2));
1514 }
1515 
1516 static int
lio_dev_cleanup_iq(struct lio_device * lio_dev,int iq_no)1517 lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)
1518 {
1519 	struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1520 	uint32_t count = 10000;
1521 
1522 	while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&
1523 			--count)
1524 		lio_flush_iq(lio_dev, iq);
1525 
1526 	return count ? 0 : 1;
1527 }
1528 
1529 static void
lio_ctrl_cmd_callback(uint32_t status __rte_unused,void * sc_ptr)1530 lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)
1531 {
1532 	struct lio_soft_command *sc = sc_ptr;
1533 	struct lio_dev_ctrl_cmd *ctrl_cmd;
1534 	struct lio_ctrl_pkt *ctrl_pkt;
1535 
1536 	ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;
1537 	ctrl_cmd = ctrl_pkt->ctrl_cmd;
1538 	ctrl_cmd->cond = 1;
1539 
1540 	lio_free_soft_command(sc);
1541 }
1542 
1543 static inline struct lio_soft_command *
lio_alloc_ctrl_pkt_sc(struct lio_device * lio_dev,struct lio_ctrl_pkt * ctrl_pkt)1544 lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,
1545 		      struct lio_ctrl_pkt *ctrl_pkt)
1546 {
1547 	struct lio_soft_command *sc = NULL;
1548 	uint32_t uddsize, datasize;
1549 	uint32_t rdatasize;
1550 	uint8_t *data;
1551 
1552 	uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);
1553 
1554 	datasize = OCTEON_CMD_SIZE + uddsize;
1555 	rdatasize = (ctrl_pkt->wait_time) ? 16 : 0;
1556 
1557 	sc = lio_alloc_soft_command(lio_dev, datasize,
1558 				    rdatasize, sizeof(struct lio_ctrl_pkt));
1559 	if (sc == NULL)
1560 		return NULL;
1561 
1562 	rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));
1563 
1564 	data = (uint8_t *)sc->virtdptr;
1565 
1566 	rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);
1567 
1568 	lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);
1569 
1570 	if (uddsize) {
1571 		/* Endian-Swap for UDD should have been done by caller. */
1572 		rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);
1573 	}
1574 
1575 	sc->iq_no = (uint32_t)ctrl_pkt->iq_no;
1576 
1577 	lio_prepare_soft_command(lio_dev, sc,
1578 				 LIO_OPCODE, LIO_OPCODE_CMD,
1579 				 0, 0, 0);
1580 
1581 	sc->callback = lio_ctrl_cmd_callback;
1582 	sc->callback_arg = sc;
1583 	sc->wait_time = ctrl_pkt->wait_time;
1584 
1585 	return sc;
1586 }
1587 
1588 int
lio_send_ctrl_pkt(struct lio_device * lio_dev,struct lio_ctrl_pkt * ctrl_pkt)1589 lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)
1590 {
1591 	struct lio_soft_command *sc = NULL;
1592 	int retval;
1593 
1594 	sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);
1595 	if (sc == NULL) {
1596 		lio_dev_err(lio_dev, "soft command allocation failed\n");
1597 		return -1;
1598 	}
1599 
1600 	retval = lio_send_soft_command(lio_dev, sc);
1601 	if (retval == LIO_IQ_SEND_FAILED) {
1602 		lio_free_soft_command(sc);
1603 		lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n",
1604 			    lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);
1605 		return -1;
1606 	}
1607 
1608 	return retval;
1609 }
1610 
1611 /** Send data packet to the device
1612  *  @param lio_dev - lio device pointer
1613  *  @param ndata   - control structure with queueing, and buffer information
1614  *
1615  *  @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
1616  *  queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
1617  */
1618 static inline int
lio_send_data_pkt(struct lio_device * lio_dev,struct lio_data_pkt * ndata)1619 lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)
1620 {
1621 	return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,
1622 				ndata->buf, ndata->datasize, ndata->reqtype);
1623 }
1624 
1625 uint16_t
lio_dev_xmit_pkts(void * tx_queue,struct rte_mbuf ** pkts,uint16_t nb_pkts)1626 lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
1627 {
1628 	struct lio_instr_queue *txq = tx_queue;
1629 	union lio_cmd_setup cmdsetup;
1630 	struct lio_device *lio_dev;
1631 	struct lio_iq_stats *stats;
1632 	struct lio_data_pkt ndata;
1633 	int i, processed = 0;
1634 	struct rte_mbuf *m;
1635 	uint32_t tag = 0;
1636 	int status = 0;
1637 	int iq_no;
1638 
1639 	lio_dev = txq->lio_dev;
1640 	iq_no = txq->txpciq.s.q_no;
1641 	stats = &lio_dev->instr_queue[iq_no]->stats;
1642 
1643 	if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {
1644 		PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
1645 			   lio_dev->linfo.link.s.link_up);
1646 		goto xmit_failed;
1647 	}
1648 
1649 	lio_dev_cleanup_iq(lio_dev, iq_no);
1650 
1651 	for (i = 0; i < nb_pkts; i++) {
1652 		uint32_t pkt_len = 0;
1653 
1654 		m = pkts[i];
1655 
1656 		/* Prepare the attributes for the data to be passed to BASE. */
1657 		memset(&ndata, 0, sizeof(struct lio_data_pkt));
1658 
1659 		ndata.buf = m;
1660 
1661 		ndata.q_no = iq_no;
1662 		if (lio_iq_is_full(lio_dev, ndata.q_no)) {
1663 			stats->tx_iq_busy++;
1664 			if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
1665 				PMD_TX_LOG(lio_dev, ERR,
1666 					   "Transmit failed iq:%d full\n",
1667 					   ndata.q_no);
1668 				break;
1669 			}
1670 		}
1671 
1672 		cmdsetup.cmd_setup64 = 0;
1673 		cmdsetup.s.iq_no = iq_no;
1674 
1675 		/* check checksum offload flags to form cmd */
1676 		if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
1677 			cmdsetup.s.ip_csum = 1;
1678 
1679 		if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
1680 			cmdsetup.s.tnl_csum = 1;
1681 		else if ((m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ||
1682 				(m->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM))
1683 			cmdsetup.s.transport_csum = 1;
1684 
1685 		if (m->nb_segs == 1) {
1686 			pkt_len = rte_pktmbuf_data_len(m);
1687 			cmdsetup.s.u.datasize = pkt_len;
1688 			lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1689 					    &cmdsetup, tag);
1690 			ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
1691 			ndata.reqtype = LIO_REQTYPE_NORESP_NET;
1692 		} else {
1693 			struct lio_buf_free_info *finfo;
1694 			struct lio_gather *g;
1695 			rte_iova_t phyaddr;
1696 			int i, frags;
1697 
1698 			finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
1699 							sizeof(*finfo), 0);
1700 			if (finfo == NULL) {
1701 				PMD_TX_LOG(lio_dev, ERR,
1702 					   "free buffer alloc failed\n");
1703 				goto xmit_failed;
1704 			}
1705 
1706 			rte_spinlock_lock(&lio_dev->glist_lock[iq_no]);
1707 			g = (struct lio_gather *)list_delete_first_node(
1708 						&lio_dev->glist_head[iq_no]);
1709 			rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);
1710 			if (g == NULL) {
1711 				PMD_TX_LOG(lio_dev, ERR,
1712 					   "Transmit scatter gather: glist null!\n");
1713 				goto xmit_failed;
1714 			}
1715 
1716 			cmdsetup.s.gather = 1;
1717 			cmdsetup.s.u.gatherptrs = m->nb_segs;
1718 			lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1719 					    &cmdsetup, tag);
1720 
1721 			memset(g->sg, 0, g->sg_size);
1722 			g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
1723 			lio_add_sg_size(&g->sg[0], m->data_len, 0);
1724 			pkt_len = m->data_len;
1725 			finfo->mbuf = m;
1726 
1727 			/* First seg taken care above */
1728 			frags = m->nb_segs - 1;
1729 			i = 1;
1730 			m = m->next;
1731 			while (frags--) {
1732 				g->sg[(i >> 2)].ptr[(i & 3)] =
1733 						rte_mbuf_data_iova(m);
1734 				lio_add_sg_size(&g->sg[(i >> 2)],
1735 						m->data_len, (i & 3));
1736 				pkt_len += m->data_len;
1737 				i++;
1738 				m = m->next;
1739 			}
1740 
1741 			phyaddr = rte_mem_virt2iova(g->sg);
1742 			if (phyaddr == RTE_BAD_IOVA) {
1743 				PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
1744 				goto xmit_failed;
1745 			}
1746 
1747 			ndata.cmd.cmd3.dptr = phyaddr;
1748 			ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
1749 
1750 			finfo->g = g;
1751 			finfo->lio_dev = lio_dev;
1752 			finfo->iq_no = (uint64_t)iq_no;
1753 			ndata.buf = finfo;
1754 		}
1755 
1756 		ndata.datasize = pkt_len;
1757 
1758 		status = lio_send_data_pkt(lio_dev, &ndata);
1759 
1760 		if (unlikely(status == LIO_IQ_SEND_FAILED)) {
1761 			PMD_TX_LOG(lio_dev, ERR, "send failed\n");
1762 			break;
1763 		}
1764 
1765 		if (unlikely(status == LIO_IQ_SEND_STOP)) {
1766 			PMD_TX_LOG(lio_dev, DEBUG, "iq full\n");
1767 			/* create space as iq is full */
1768 			lio_dev_cleanup_iq(lio_dev, iq_no);
1769 		}
1770 
1771 		stats->tx_done++;
1772 		stats->tx_tot_bytes += pkt_len;
1773 		processed++;
1774 	}
1775 
1776 xmit_failed:
1777 	stats->tx_dropped += (nb_pkts - processed);
1778 
1779 	return processed;
1780 }
1781 
1782 void
lio_dev_clear_queues(struct rte_eth_dev * eth_dev)1783 lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
1784 {
1785 	struct lio_instr_queue *txq;
1786 	struct lio_droq *rxq;
1787 	uint16_t i;
1788 
1789 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1790 		txq = eth_dev->data->tx_queues[i];
1791 		if (txq != NULL) {
1792 			lio_dev_tx_queue_release(eth_dev, i);
1793 			eth_dev->data->tx_queues[i] = NULL;
1794 		}
1795 	}
1796 
1797 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1798 		rxq = eth_dev->data->rx_queues[i];
1799 		if (rxq != NULL) {
1800 			lio_dev_rx_queue_release(eth_dev, i);
1801 			eth_dev->data->rx_queues[i] = NULL;
1802 		}
1803 	}
1804 }
1805