xref: /f-stack/dpdk/drivers/net/ark/ark_ethdev_rx.c (revision 2d9fd380)
1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright (c) 2015-2018 Atomic Rules LLC
32bfe3f2eSlogwang  */
42bfe3f2eSlogwang 
52bfe3f2eSlogwang #include <unistd.h>
62bfe3f2eSlogwang 
7*2d9fd380Sjfb8856606 #include "rte_pmd_ark.h"
82bfe3f2eSlogwang #include "ark_ethdev_rx.h"
92bfe3f2eSlogwang #include "ark_global.h"
102bfe3f2eSlogwang #include "ark_logs.h"
112bfe3f2eSlogwang #include "ark_mpu.h"
122bfe3f2eSlogwang #include "ark_udm.h"
132bfe3f2eSlogwang 
142bfe3f2eSlogwang #define ARK_RX_META_SIZE 32
152bfe3f2eSlogwang #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
162bfe3f2eSlogwang #define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
172bfe3f2eSlogwang 
182bfe3f2eSlogwang /* Forward declarations */
192bfe3f2eSlogwang struct ark_rx_queue;
202bfe3f2eSlogwang struct ark_rx_meta;
212bfe3f2eSlogwang 
222bfe3f2eSlogwang static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
232bfe3f2eSlogwang static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
242bfe3f2eSlogwang static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
252bfe3f2eSlogwang 				 struct ark_rx_meta *meta,
262bfe3f2eSlogwang 				 struct rte_mbuf *mbuf0,
272bfe3f2eSlogwang 				 uint32_t cons_index);
282bfe3f2eSlogwang static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
29d30ea906Sjfb8856606 static int eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
30d30ea906Sjfb8856606 				    uint32_t *pnb,
31d30ea906Sjfb8856606 				    struct rte_mbuf **mbufs);
322bfe3f2eSlogwang 
332bfe3f2eSlogwang /* ************************************************************************* */
342bfe3f2eSlogwang struct ark_rx_queue {
352bfe3f2eSlogwang 	/* array of mbufs to populate */
362bfe3f2eSlogwang 	struct rte_mbuf **reserve_q;
372bfe3f2eSlogwang 	/* array of physical addresses of the mbuf data pointer */
382bfe3f2eSlogwang 	/* This point is a virtual address */
392bfe3f2eSlogwang 	rte_iova_t *paddress_q;
402bfe3f2eSlogwang 	struct rte_mempool *mb_pool;
412bfe3f2eSlogwang 
422bfe3f2eSlogwang 	struct ark_udm_t *udm;
432bfe3f2eSlogwang 	struct ark_mpu_t *mpu;
442bfe3f2eSlogwang 
452bfe3f2eSlogwang 	uint32_t queue_size;
462bfe3f2eSlogwang 	uint32_t queue_mask;
472bfe3f2eSlogwang 
482bfe3f2eSlogwang 	uint32_t seed_index;		/* step 1 set with empty mbuf */
492bfe3f2eSlogwang 	uint32_t cons_index;		/* step 3 consumed by driver */
502bfe3f2eSlogwang 
512bfe3f2eSlogwang 	/* The queue Id is used to identify the HW Q */
522bfe3f2eSlogwang 	uint16_t phys_qid;
532bfe3f2eSlogwang 
542bfe3f2eSlogwang 	/* The queue Index is used within the dpdk device structures */
552bfe3f2eSlogwang 	uint16_t queue_index;
562bfe3f2eSlogwang 
57d30ea906Sjfb8856606 	uint32_t last_cons;
582bfe3f2eSlogwang 
592bfe3f2eSlogwang 	/* separate cache line */
602bfe3f2eSlogwang 	/* second cache line - fields only used in slow path */
61*2d9fd380Sjfb8856606 	RTE_MARKER cacheline1 __rte_cache_min_aligned;
622bfe3f2eSlogwang 
632bfe3f2eSlogwang 	volatile uint32_t prod_index;	/* step 2 filled by FPGA */
642bfe3f2eSlogwang } __rte_cache_aligned;
652bfe3f2eSlogwang 
662bfe3f2eSlogwang 
672bfe3f2eSlogwang /* ************************************************************************* */
682bfe3f2eSlogwang static int
eth_ark_rx_hw_setup(struct rte_eth_dev * dev,struct ark_rx_queue * queue,uint16_t rx_queue_id __rte_unused,uint16_t rx_queue_idx)692bfe3f2eSlogwang eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
702bfe3f2eSlogwang 		    struct ark_rx_queue *queue,
712bfe3f2eSlogwang 		    uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
722bfe3f2eSlogwang {
732bfe3f2eSlogwang 	rte_iova_t queue_base;
742bfe3f2eSlogwang 	rte_iova_t phys_addr_q_base;
752bfe3f2eSlogwang 	rte_iova_t phys_addr_prod_index;
762bfe3f2eSlogwang 
772bfe3f2eSlogwang 	queue_base = rte_malloc_virt2iova(queue);
782bfe3f2eSlogwang 	phys_addr_prod_index = queue_base +
792bfe3f2eSlogwang 		offsetof(struct ark_rx_queue, prod_index);
802bfe3f2eSlogwang 
812bfe3f2eSlogwang 	phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
822bfe3f2eSlogwang 
832bfe3f2eSlogwang 	/* Verify HW */
842bfe3f2eSlogwang 	if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
85*2d9fd380Sjfb8856606 		ARK_PMD_LOG(ERR, "Illegal configuration rx queue\n");
862bfe3f2eSlogwang 		return -1;
872bfe3f2eSlogwang 	}
882bfe3f2eSlogwang 
892bfe3f2eSlogwang 	/* Stop and Reset and configure MPU */
902bfe3f2eSlogwang 	ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0);
912bfe3f2eSlogwang 
922bfe3f2eSlogwang 	ark_udm_write_addr(queue->udm, phys_addr_prod_index);
932bfe3f2eSlogwang 
942bfe3f2eSlogwang 	/* advance the valid pointer, but don't start until the queue starts */
952bfe3f2eSlogwang 	ark_mpu_reset_stats(queue->mpu);
962bfe3f2eSlogwang 
972bfe3f2eSlogwang 	/* The seed is the producer index for the HW */
982bfe3f2eSlogwang 	ark_mpu_set_producer(queue->mpu, queue->seed_index);
992bfe3f2eSlogwang 	dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1002bfe3f2eSlogwang 
1012bfe3f2eSlogwang 	return 0;
1022bfe3f2eSlogwang }
1032bfe3f2eSlogwang 
1042bfe3f2eSlogwang static inline void
eth_ark_rx_update_cons_index(struct ark_rx_queue * queue,uint32_t cons_index)1052bfe3f2eSlogwang eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
1062bfe3f2eSlogwang {
1072bfe3f2eSlogwang 	queue->cons_index = cons_index;
1082bfe3f2eSlogwang 	eth_ark_rx_seed_mbufs(queue);
109d30ea906Sjfb8856606 	if (((cons_index - queue->last_cons) >= 64U)) {
110d30ea906Sjfb8856606 		queue->last_cons = cons_index;
1112bfe3f2eSlogwang 		ark_mpu_set_producer(queue->mpu, queue->seed_index);
1122bfe3f2eSlogwang 	}
113d30ea906Sjfb8856606 }
1142bfe3f2eSlogwang 
1152bfe3f2eSlogwang /* ************************************************************************* */
1162bfe3f2eSlogwang int
eth_ark_dev_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mb_pool)1172bfe3f2eSlogwang eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
1182bfe3f2eSlogwang 			   uint16_t queue_idx,
1192bfe3f2eSlogwang 			   uint16_t nb_desc,
1202bfe3f2eSlogwang 			   unsigned int socket_id,
1212bfe3f2eSlogwang 			   const struct rte_eth_rxconf *rx_conf,
1222bfe3f2eSlogwang 			   struct rte_mempool *mb_pool)
1232bfe3f2eSlogwang {
1242bfe3f2eSlogwang 	static int warning1;		/* = 0 */
1254b05018fSfengbojiang 	struct ark_adapter *ark = dev->data->dev_private;
1262bfe3f2eSlogwang 
1272bfe3f2eSlogwang 	struct ark_rx_queue *queue;
1282bfe3f2eSlogwang 	uint32_t i;
1292bfe3f2eSlogwang 	int status;
1302bfe3f2eSlogwang 
1314b05018fSfengbojiang 	int qidx = queue_idx;
1322bfe3f2eSlogwang 
1332bfe3f2eSlogwang 	/* We may already be setup, free memory prior to re-allocation */
1342bfe3f2eSlogwang 	if (dev->data->rx_queues[queue_idx] != NULL) {
1352bfe3f2eSlogwang 		eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
1362bfe3f2eSlogwang 		dev->data->rx_queues[queue_idx] = NULL;
1372bfe3f2eSlogwang 	}
1382bfe3f2eSlogwang 
1392bfe3f2eSlogwang 	if (rx_conf != NULL && warning1 == 0) {
1402bfe3f2eSlogwang 		warning1 = 1;
141*2d9fd380Sjfb8856606 		ARK_PMD_LOG(NOTICE,
1422bfe3f2eSlogwang 			    "Arkville ignores rte_eth_rxconf argument.\n");
1432bfe3f2eSlogwang 	}
1442bfe3f2eSlogwang 
1452bfe3f2eSlogwang 	if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
146*2d9fd380Sjfb8856606 		ARK_PMD_LOG(ERR,
1472bfe3f2eSlogwang 			    "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
1482bfe3f2eSlogwang 			    ARK_RX_META_SIZE, __func__);
1492bfe3f2eSlogwang 		return -1;		/* ERROR CODE */
1502bfe3f2eSlogwang 	}
1512bfe3f2eSlogwang 
1522bfe3f2eSlogwang 	if (!rte_is_power_of_2(nb_desc)) {
153*2d9fd380Sjfb8856606 		ARK_PMD_LOG(ERR,
1542bfe3f2eSlogwang 			    "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
1552bfe3f2eSlogwang 			    nb_desc, __func__);
1562bfe3f2eSlogwang 		return -1;		/* ERROR CODE */
1572bfe3f2eSlogwang 	}
1582bfe3f2eSlogwang 
1592bfe3f2eSlogwang 	/* Allocate queue struct */
1602bfe3f2eSlogwang 	queue = rte_zmalloc_socket("Ark_rxqueue",
1612bfe3f2eSlogwang 				   sizeof(struct ark_rx_queue),
1622bfe3f2eSlogwang 				   64,
1632bfe3f2eSlogwang 				   socket_id);
1642bfe3f2eSlogwang 	if (queue == 0) {
165*2d9fd380Sjfb8856606 		ARK_PMD_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
1662bfe3f2eSlogwang 		return -ENOMEM;
1672bfe3f2eSlogwang 	}
1682bfe3f2eSlogwang 
1692bfe3f2eSlogwang 	/* NOTE zmalloc is used, no need to 0 indexes, etc. */
1702bfe3f2eSlogwang 	queue->mb_pool = mb_pool;
1712bfe3f2eSlogwang 	queue->phys_qid = qidx;
1722bfe3f2eSlogwang 	queue->queue_index = queue_idx;
1732bfe3f2eSlogwang 	queue->queue_size = nb_desc;
1742bfe3f2eSlogwang 	queue->queue_mask = nb_desc - 1;
1752bfe3f2eSlogwang 
1762bfe3f2eSlogwang 	queue->reserve_q =
1772bfe3f2eSlogwang 		rte_zmalloc_socket("Ark_rx_queue mbuf",
1782bfe3f2eSlogwang 				   nb_desc * sizeof(struct rte_mbuf *),
1792bfe3f2eSlogwang 				   64,
1802bfe3f2eSlogwang 				   socket_id);
1812bfe3f2eSlogwang 	queue->paddress_q =
1822bfe3f2eSlogwang 		rte_zmalloc_socket("Ark_rx_queue paddr",
1832bfe3f2eSlogwang 				   nb_desc * sizeof(rte_iova_t),
1842bfe3f2eSlogwang 				   64,
1852bfe3f2eSlogwang 				   socket_id);
1862bfe3f2eSlogwang 
1872bfe3f2eSlogwang 	if (queue->reserve_q == 0 || queue->paddress_q == 0) {
188*2d9fd380Sjfb8856606 		ARK_PMD_LOG(ERR,
1892bfe3f2eSlogwang 			    "Failed to allocate queue memory in %s\n",
1902bfe3f2eSlogwang 			    __func__);
1912bfe3f2eSlogwang 		rte_free(queue->reserve_q);
1922bfe3f2eSlogwang 		rte_free(queue->paddress_q);
1932bfe3f2eSlogwang 		rte_free(queue);
1942bfe3f2eSlogwang 		return -ENOMEM;
1952bfe3f2eSlogwang 	}
1962bfe3f2eSlogwang 
1972bfe3f2eSlogwang 	dev->data->rx_queues[queue_idx] = queue;
1982bfe3f2eSlogwang 	queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
1992bfe3f2eSlogwang 	queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
2002bfe3f2eSlogwang 
2012bfe3f2eSlogwang 	/* populate mbuf reserve */
2022bfe3f2eSlogwang 	status = eth_ark_rx_seed_mbufs(queue);
2032bfe3f2eSlogwang 
204d30ea906Sjfb8856606 	if (queue->seed_index != nb_desc) {
205*2d9fd380Sjfb8856606 		ARK_PMD_LOG(ERR, "Failed to allocate %u mbufs for RX queue %d\n",
206d30ea906Sjfb8856606 			    nb_desc, qidx);
207d30ea906Sjfb8856606 		status = -1;
208d30ea906Sjfb8856606 	}
2092bfe3f2eSlogwang 	/* MPU Setup */
2102bfe3f2eSlogwang 	if (status == 0)
2112bfe3f2eSlogwang 		status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
2122bfe3f2eSlogwang 
2132bfe3f2eSlogwang 	if (unlikely(status != 0)) {
214d30ea906Sjfb8856606 		struct rte_mbuf **mbuf;
2152bfe3f2eSlogwang 
216*2d9fd380Sjfb8856606 		ARK_PMD_LOG(ERR, "Failed to initialize RX queue %d %s\n",
2172bfe3f2eSlogwang 			    qidx,
2182bfe3f2eSlogwang 			    __func__);
2192bfe3f2eSlogwang 		/* Free the mbufs allocated */
220d30ea906Sjfb8856606 		for (i = 0, mbuf = queue->reserve_q;
221d30ea906Sjfb8856606 		     i < queue->seed_index; ++i, mbuf++) {
222d30ea906Sjfb8856606 			rte_pktmbuf_free(*mbuf);
2232bfe3f2eSlogwang 		}
2242bfe3f2eSlogwang 		rte_free(queue->reserve_q);
2252bfe3f2eSlogwang 		rte_free(queue->paddress_q);
2262bfe3f2eSlogwang 		rte_free(queue);
2272bfe3f2eSlogwang 		return -1;		/* ERROR CODE */
2282bfe3f2eSlogwang 	}
2292bfe3f2eSlogwang 
2302bfe3f2eSlogwang 	return 0;
2312bfe3f2eSlogwang }
2322bfe3f2eSlogwang 
2332bfe3f2eSlogwang /* ************************************************************************* */
2342bfe3f2eSlogwang uint16_t
eth_ark_recv_pkts_noop(void * rx_queue __rte_unused,struct rte_mbuf ** rx_pkts __rte_unused,uint16_t nb_pkts __rte_unused)2352bfe3f2eSlogwang eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
2362bfe3f2eSlogwang 		       struct rte_mbuf **rx_pkts __rte_unused,
2372bfe3f2eSlogwang 		       uint16_t nb_pkts __rte_unused)
2382bfe3f2eSlogwang {
2392bfe3f2eSlogwang 	return 0;
2402bfe3f2eSlogwang }
2412bfe3f2eSlogwang 
2422bfe3f2eSlogwang /* ************************************************************************* */
2432bfe3f2eSlogwang uint16_t
eth_ark_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)2442bfe3f2eSlogwang eth_ark_recv_pkts(void *rx_queue,
2452bfe3f2eSlogwang 		  struct rte_mbuf **rx_pkts,
2462bfe3f2eSlogwang 		  uint16_t nb_pkts)
2472bfe3f2eSlogwang {
2482bfe3f2eSlogwang 	struct ark_rx_queue *queue;
2492bfe3f2eSlogwang 	register uint32_t cons_index, prod_index;
2502bfe3f2eSlogwang 	uint16_t nb;
2512bfe3f2eSlogwang 	struct rte_mbuf *mbuf;
2522bfe3f2eSlogwang 	struct ark_rx_meta *meta;
2532bfe3f2eSlogwang 
2542bfe3f2eSlogwang 	queue = (struct ark_rx_queue *)rx_queue;
2552bfe3f2eSlogwang 	if (unlikely(queue == 0))
2562bfe3f2eSlogwang 		return 0;
2572bfe3f2eSlogwang 	if (unlikely(nb_pkts == 0))
2582bfe3f2eSlogwang 		return 0;
2592bfe3f2eSlogwang 	prod_index = queue->prod_index;
2602bfe3f2eSlogwang 	cons_index = queue->cons_index;
2612bfe3f2eSlogwang 	nb = 0;
2622bfe3f2eSlogwang 
2632bfe3f2eSlogwang 	while (prod_index != cons_index) {
2642bfe3f2eSlogwang 		mbuf = queue->reserve_q[cons_index & queue->queue_mask];
2652bfe3f2eSlogwang 		/* prefetch mbuf */
2662bfe3f2eSlogwang 		rte_mbuf_prefetch_part1(mbuf);
2672bfe3f2eSlogwang 		rte_mbuf_prefetch_part2(mbuf);
2682bfe3f2eSlogwang 
2692bfe3f2eSlogwang 		/* META DATA embedded in headroom */
2702bfe3f2eSlogwang 		meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
2712bfe3f2eSlogwang 
2722bfe3f2eSlogwang 		mbuf->port = meta->port;
2732bfe3f2eSlogwang 		mbuf->pkt_len = meta->pkt_len;
2742bfe3f2eSlogwang 		mbuf->data_len = meta->pkt_len;
275*2d9fd380Sjfb8856606 		/* set timestamp if enabled at least on one device */
276*2d9fd380Sjfb8856606 		if (ark_timestamp_rx_dynflag > 0) {
277*2d9fd380Sjfb8856606 			*RTE_MBUF_DYNFIELD(mbuf, ark_timestamp_dynfield_offset,
278*2d9fd380Sjfb8856606 				rte_mbuf_timestamp_t *) = meta->timestamp;
279*2d9fd380Sjfb8856606 			mbuf->ol_flags |= ark_timestamp_rx_dynflag;
280*2d9fd380Sjfb8856606 		}
281*2d9fd380Sjfb8856606 		rte_pmd_ark_mbuf_rx_userdata_set(mbuf, meta->user_data);
2822bfe3f2eSlogwang 
283*2d9fd380Sjfb8856606 		if (ARK_DEBUG_CORE) {	/* debug sanity checks */
2842bfe3f2eSlogwang 			if ((meta->pkt_len > (1024 * 16)) ||
2852bfe3f2eSlogwang 			    (meta->pkt_len == 0)) {
286*2d9fd380Sjfb8856606 				ARK_PMD_LOG(DEBUG, "RX: Bad Meta Q: %u"
2872bfe3f2eSlogwang 					   " cons: %" PRIU32
2882bfe3f2eSlogwang 					   " prod: %" PRIU32
2892bfe3f2eSlogwang 					   " seed_index %" PRIU32
2902bfe3f2eSlogwang 					   "\n",
2912bfe3f2eSlogwang 					   queue->phys_qid,
2922bfe3f2eSlogwang 					   cons_index,
2932bfe3f2eSlogwang 					   queue->prod_index,
2942bfe3f2eSlogwang 					   queue->seed_index);
2952bfe3f2eSlogwang 
2962bfe3f2eSlogwang 
297*2d9fd380Sjfb8856606 				ARK_PMD_LOG(DEBUG, "       :  UDM"
2982bfe3f2eSlogwang 					   " prod: %" PRIU32
2992bfe3f2eSlogwang 					   " len: %u\n",
3002bfe3f2eSlogwang 					   queue->udm->rt_cfg.prod_idx,
3012bfe3f2eSlogwang 					   meta->pkt_len);
3022bfe3f2eSlogwang 				ark_mpu_dump(queue->mpu,
3032bfe3f2eSlogwang 					     "    ",
3042bfe3f2eSlogwang 					     queue->phys_qid);
3052bfe3f2eSlogwang 				dump_mbuf_data(mbuf, 0, 256);
3062bfe3f2eSlogwang 				/* its FUBAR so fix it */
3072bfe3f2eSlogwang 				mbuf->pkt_len = 63;
3082bfe3f2eSlogwang 				meta->pkt_len = 63;
3092bfe3f2eSlogwang 			}
3102bfe3f2eSlogwang 		}
3112bfe3f2eSlogwang 
3122bfe3f2eSlogwang 		if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
3132bfe3f2eSlogwang 			cons_index = eth_ark_rx_jumbo
3142bfe3f2eSlogwang 				(queue, meta, mbuf, cons_index + 1);
3152bfe3f2eSlogwang 		else
3162bfe3f2eSlogwang 			cons_index += 1;
3172bfe3f2eSlogwang 
3182bfe3f2eSlogwang 		rx_pkts[nb] = mbuf;
3192bfe3f2eSlogwang 		nb++;
3202bfe3f2eSlogwang 		if (nb >= nb_pkts)
3212bfe3f2eSlogwang 			break;
3222bfe3f2eSlogwang 	}
3232bfe3f2eSlogwang 
3242bfe3f2eSlogwang 	if (unlikely(nb != 0))
3252bfe3f2eSlogwang 		/* report next free to FPGA */
3262bfe3f2eSlogwang 		eth_ark_rx_update_cons_index(queue, cons_index);
3272bfe3f2eSlogwang 
3282bfe3f2eSlogwang 	return nb;
3292bfe3f2eSlogwang }
3302bfe3f2eSlogwang 
3312bfe3f2eSlogwang /* ************************************************************************* */
3322bfe3f2eSlogwang static uint32_t
eth_ark_rx_jumbo(struct ark_rx_queue * queue,struct ark_rx_meta * meta,struct rte_mbuf * mbuf0,uint32_t cons_index)3332bfe3f2eSlogwang eth_ark_rx_jumbo(struct ark_rx_queue *queue,
3342bfe3f2eSlogwang 		 struct ark_rx_meta *meta,
3352bfe3f2eSlogwang 		 struct rte_mbuf *mbuf0,
3362bfe3f2eSlogwang 		 uint32_t cons_index)
3372bfe3f2eSlogwang {
3382bfe3f2eSlogwang 	struct rte_mbuf *mbuf_prev;
3392bfe3f2eSlogwang 	struct rte_mbuf *mbuf;
3402bfe3f2eSlogwang 
3412bfe3f2eSlogwang 	uint16_t remaining;
3422bfe3f2eSlogwang 	uint16_t data_len;
3432bfe3f2eSlogwang 	uint16_t segments;
3442bfe3f2eSlogwang 
3452bfe3f2eSlogwang 	/* first buf populated by called */
3462bfe3f2eSlogwang 	mbuf_prev = mbuf0;
3472bfe3f2eSlogwang 	segments = 1;
3482bfe3f2eSlogwang 	data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
3492bfe3f2eSlogwang 	remaining = meta->pkt_len - data_len;
3502bfe3f2eSlogwang 	mbuf0->data_len = data_len;
3512bfe3f2eSlogwang 
3522bfe3f2eSlogwang 	/* HW guarantees that the data does not exceed prod_index! */
3532bfe3f2eSlogwang 	while (remaining != 0) {
3542bfe3f2eSlogwang 		data_len = RTE_MIN(remaining,
3552bfe3f2eSlogwang 				   RTE_MBUF_DEFAULT_DATAROOM +
3562bfe3f2eSlogwang 				   RTE_PKTMBUF_HEADROOM);
3572bfe3f2eSlogwang 
3582bfe3f2eSlogwang 		remaining -= data_len;
3592bfe3f2eSlogwang 		segments += 1;
3602bfe3f2eSlogwang 
3612bfe3f2eSlogwang 		mbuf = queue->reserve_q[cons_index & queue->queue_mask];
3622bfe3f2eSlogwang 		mbuf_prev->next = mbuf;
3632bfe3f2eSlogwang 		mbuf_prev = mbuf;
3642bfe3f2eSlogwang 		mbuf->data_len = data_len;
3652bfe3f2eSlogwang 		mbuf->data_off = 0;
3662bfe3f2eSlogwang 
3672bfe3f2eSlogwang 		cons_index += 1;
3682bfe3f2eSlogwang 	}
3692bfe3f2eSlogwang 
3702bfe3f2eSlogwang 	mbuf0->nb_segs = segments;
3712bfe3f2eSlogwang 	return cons_index;
3722bfe3f2eSlogwang }
3732bfe3f2eSlogwang 
3742bfe3f2eSlogwang /* Drain the internal queue allowing hw to clear out. */
3752bfe3f2eSlogwang static void
eth_ark_rx_queue_drain(struct ark_rx_queue * queue)3762bfe3f2eSlogwang eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
3772bfe3f2eSlogwang {
3782bfe3f2eSlogwang 	register uint32_t cons_index;
3792bfe3f2eSlogwang 	struct rte_mbuf *mbuf;
3802bfe3f2eSlogwang 
3812bfe3f2eSlogwang 	cons_index = queue->cons_index;
3822bfe3f2eSlogwang 
3832bfe3f2eSlogwang 	/* NOT performance optimized, since this is a one-shot call */
3842bfe3f2eSlogwang 	while ((cons_index ^ queue->prod_index) & queue->queue_mask) {
3852bfe3f2eSlogwang 		mbuf = queue->reserve_q[cons_index & queue->queue_mask];
3862bfe3f2eSlogwang 		rte_pktmbuf_free(mbuf);
3872bfe3f2eSlogwang 		cons_index++;
3882bfe3f2eSlogwang 		eth_ark_rx_update_cons_index(queue, cons_index);
3892bfe3f2eSlogwang 	}
3902bfe3f2eSlogwang }
3912bfe3f2eSlogwang 
3922bfe3f2eSlogwang uint32_t
eth_ark_dev_rx_queue_count(struct rte_eth_dev * dev,uint16_t queue_id)3932bfe3f2eSlogwang eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
3942bfe3f2eSlogwang {
3952bfe3f2eSlogwang 	struct ark_rx_queue *queue;
3962bfe3f2eSlogwang 
3972bfe3f2eSlogwang 	queue = dev->data->rx_queues[queue_id];
3982bfe3f2eSlogwang 	return (queue->prod_index - queue->cons_index);	/* mod arith */
3992bfe3f2eSlogwang }
4002bfe3f2eSlogwang 
4012bfe3f2eSlogwang /* ************************************************************************* */
4022bfe3f2eSlogwang int
eth_ark_rx_start_queue(struct rte_eth_dev * dev,uint16_t queue_id)4032bfe3f2eSlogwang eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4042bfe3f2eSlogwang {
4052bfe3f2eSlogwang 	struct ark_rx_queue *queue;
4062bfe3f2eSlogwang 
4072bfe3f2eSlogwang 	queue = dev->data->rx_queues[queue_id];
4082bfe3f2eSlogwang 	if (queue == 0)
4092bfe3f2eSlogwang 		return -1;
4102bfe3f2eSlogwang 
4112bfe3f2eSlogwang 	dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4122bfe3f2eSlogwang 
4132bfe3f2eSlogwang 	ark_mpu_set_producer(queue->mpu, queue->seed_index);
4142bfe3f2eSlogwang 	ark_mpu_start(queue->mpu);
4152bfe3f2eSlogwang 
4162bfe3f2eSlogwang 	ark_udm_queue_enable(queue->udm, 1);
4172bfe3f2eSlogwang 
4182bfe3f2eSlogwang 	return 0;
4192bfe3f2eSlogwang }
4202bfe3f2eSlogwang 
4212bfe3f2eSlogwang /* ************************************************************************* */
4222bfe3f2eSlogwang 
4232bfe3f2eSlogwang /* Queue can be restarted.   data remains
4242bfe3f2eSlogwang  */
4252bfe3f2eSlogwang int
eth_ark_rx_stop_queue(struct rte_eth_dev * dev,uint16_t queue_id)4262bfe3f2eSlogwang eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
4272bfe3f2eSlogwang {
4282bfe3f2eSlogwang 	struct ark_rx_queue *queue;
4292bfe3f2eSlogwang 
4302bfe3f2eSlogwang 	queue = dev->data->rx_queues[queue_id];
4312bfe3f2eSlogwang 	if (queue == 0)
4322bfe3f2eSlogwang 		return -1;
4332bfe3f2eSlogwang 
4342bfe3f2eSlogwang 	ark_udm_queue_enable(queue->udm, 0);
4352bfe3f2eSlogwang 
4362bfe3f2eSlogwang 	dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4372bfe3f2eSlogwang 
4382bfe3f2eSlogwang 	return 0;
4392bfe3f2eSlogwang }
4402bfe3f2eSlogwang 
4412bfe3f2eSlogwang /* ************************************************************************* */
4422bfe3f2eSlogwang static inline int
eth_ark_rx_seed_mbufs(struct ark_rx_queue * queue)4432bfe3f2eSlogwang eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
4442bfe3f2eSlogwang {
4452bfe3f2eSlogwang 	uint32_t limit = queue->cons_index + queue->queue_size;
4462bfe3f2eSlogwang 	uint32_t seed_index = queue->seed_index;
4472bfe3f2eSlogwang 
4482bfe3f2eSlogwang 	uint32_t count = 0;
4492bfe3f2eSlogwang 	uint32_t seed_m = queue->seed_index & queue->queue_mask;
4502bfe3f2eSlogwang 
4512bfe3f2eSlogwang 	uint32_t nb = limit - seed_index;
4522bfe3f2eSlogwang 
4532bfe3f2eSlogwang 	/* Handle wrap around -- remainder is filled on the next call */
4542bfe3f2eSlogwang 	if (unlikely(seed_m + nb > queue->queue_size))
4552bfe3f2eSlogwang 		nb = queue->queue_size - seed_m;
4562bfe3f2eSlogwang 
4572bfe3f2eSlogwang 	struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
4582bfe3f2eSlogwang 	int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
4592bfe3f2eSlogwang 
460d30ea906Sjfb8856606 	if (unlikely(status != 0)) {
461d30ea906Sjfb8856606 		/* Try to recover from lack of mbufs in pool */
462d30ea906Sjfb8856606 		status = eth_ark_rx_seed_recovery(queue, &nb, mbufs);
463d30ea906Sjfb8856606 		if (unlikely(status != 0)) {
4642bfe3f2eSlogwang 			return -1;
465d30ea906Sjfb8856606 		}
466d30ea906Sjfb8856606 	}
4672bfe3f2eSlogwang 
468*2d9fd380Sjfb8856606 	if (ARK_DEBUG_CORE) {		/* DEBUG */
4692bfe3f2eSlogwang 		while (count != nb) {
4702bfe3f2eSlogwang 			struct rte_mbuf *mbuf_init =
4712bfe3f2eSlogwang 				queue->reserve_q[seed_m + count];
4722bfe3f2eSlogwang 
4732bfe3f2eSlogwang 			memset(mbuf_init->buf_addr, -1, 512);
4742bfe3f2eSlogwang 			*((uint32_t *)mbuf_init->buf_addr) =
4752bfe3f2eSlogwang 				seed_index + count;
4762bfe3f2eSlogwang 			*(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
4772bfe3f2eSlogwang 				queue->phys_qid;
4782bfe3f2eSlogwang 			count++;
4792bfe3f2eSlogwang 		}
4802bfe3f2eSlogwang 		count = 0;
4812bfe3f2eSlogwang 	} /* DEBUG */
4822bfe3f2eSlogwang 	queue->seed_index += nb;
4832bfe3f2eSlogwang 
4842bfe3f2eSlogwang 	/* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
4852bfe3f2eSlogwang 	switch (nb % 4) {
4862bfe3f2eSlogwang 	case 0:
4872bfe3f2eSlogwang 		while (count != nb) {
4882bfe3f2eSlogwang 			queue->paddress_q[seed_m++] =
4892bfe3f2eSlogwang 				(*mbufs++)->buf_iova;
4902bfe3f2eSlogwang 			count++;
4912bfe3f2eSlogwang 		/* FALLTHROUGH */
4922bfe3f2eSlogwang 	case 3:
4932bfe3f2eSlogwang 		queue->paddress_q[seed_m++] =
4942bfe3f2eSlogwang 			(*mbufs++)->buf_iova;
4952bfe3f2eSlogwang 		count++;
4962bfe3f2eSlogwang 		/* FALLTHROUGH */
4972bfe3f2eSlogwang 	case 2:
4982bfe3f2eSlogwang 		queue->paddress_q[seed_m++] =
4992bfe3f2eSlogwang 			(*mbufs++)->buf_iova;
5002bfe3f2eSlogwang 		count++;
5012bfe3f2eSlogwang 		/* FALLTHROUGH */
5022bfe3f2eSlogwang 	case 1:
5032bfe3f2eSlogwang 		queue->paddress_q[seed_m++] =
5042bfe3f2eSlogwang 			(*mbufs++)->buf_iova;
5052bfe3f2eSlogwang 		count++;
5062bfe3f2eSlogwang 		/* FALLTHROUGH */
5072bfe3f2eSlogwang 
5082bfe3f2eSlogwang 		} /* while (count != nb) */
5092bfe3f2eSlogwang 	} /* switch */
5102bfe3f2eSlogwang 
5112bfe3f2eSlogwang 	return 0;
5122bfe3f2eSlogwang }
5132bfe3f2eSlogwang 
514d30ea906Sjfb8856606 int
eth_ark_rx_seed_recovery(struct ark_rx_queue * queue,uint32_t * pnb,struct rte_mbuf ** mbufs)515d30ea906Sjfb8856606 eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
516d30ea906Sjfb8856606 			 uint32_t *pnb,
517d30ea906Sjfb8856606 			 struct rte_mbuf **mbufs)
518d30ea906Sjfb8856606 {
519d30ea906Sjfb8856606 	int status = -1;
520d30ea906Sjfb8856606 
521d30ea906Sjfb8856606 	/* Ignore small allocation failures */
522d30ea906Sjfb8856606 	if (*pnb <= 64)
523d30ea906Sjfb8856606 		return -1;
524d30ea906Sjfb8856606 
525d30ea906Sjfb8856606 	*pnb = 64U;
526d30ea906Sjfb8856606 	status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, *pnb);
527d30ea906Sjfb8856606 	if (status != 0) {
528*2d9fd380Sjfb8856606 		ARK_PMD_LOG(NOTICE,
529d30ea906Sjfb8856606 			    "ARK: Could not allocate %u mbufs from pool for RX queue %u;"
530d30ea906Sjfb8856606 			    " %u free buffers remaining in queue\n",
531d30ea906Sjfb8856606 			    *pnb, queue->queue_index,
532d30ea906Sjfb8856606 			    queue->seed_index - queue->cons_index);
533d30ea906Sjfb8856606 	}
534d30ea906Sjfb8856606 	return status;
535d30ea906Sjfb8856606 }
536d30ea906Sjfb8856606 
5372bfe3f2eSlogwang void
eth_ark_rx_dump_queue(struct rte_eth_dev * dev,uint16_t queue_id,const char * msg)5382bfe3f2eSlogwang eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
5392bfe3f2eSlogwang 		      const char *msg)
5402bfe3f2eSlogwang {
5412bfe3f2eSlogwang 	struct ark_rx_queue *queue;
5422bfe3f2eSlogwang 
5432bfe3f2eSlogwang 	queue = dev->data->rx_queues[queue_id];
5442bfe3f2eSlogwang 
5452bfe3f2eSlogwang 	ark_ethdev_rx_dump(msg, queue);
5462bfe3f2eSlogwang }
5472bfe3f2eSlogwang 
5482bfe3f2eSlogwang /* ************************************************************************* */
5492bfe3f2eSlogwang /* Call on device closed no user API, queue is stopped */
5502bfe3f2eSlogwang void
eth_ark_dev_rx_queue_release(void * vqueue)5512bfe3f2eSlogwang eth_ark_dev_rx_queue_release(void *vqueue)
5522bfe3f2eSlogwang {
5532bfe3f2eSlogwang 	struct ark_rx_queue *queue;
5542bfe3f2eSlogwang 	uint32_t i;
5552bfe3f2eSlogwang 
5562bfe3f2eSlogwang 	queue = (struct ark_rx_queue *)vqueue;
5572bfe3f2eSlogwang 	if (queue == 0)
5582bfe3f2eSlogwang 		return;
5592bfe3f2eSlogwang 
5602bfe3f2eSlogwang 	ark_udm_queue_enable(queue->udm, 0);
5612bfe3f2eSlogwang 	/* Stop the MPU since pointer are going away */
5622bfe3f2eSlogwang 	ark_mpu_stop(queue->mpu);
5632bfe3f2eSlogwang 
5642bfe3f2eSlogwang 	/* Need to clear out mbufs here, dropping packets along the way */
5652bfe3f2eSlogwang 	eth_ark_rx_queue_drain(queue);
5662bfe3f2eSlogwang 
5672bfe3f2eSlogwang 	for (i = 0; i < queue->queue_size; ++i)
5682bfe3f2eSlogwang 		rte_pktmbuf_free(queue->reserve_q[i]);
5692bfe3f2eSlogwang 
5702bfe3f2eSlogwang 	rte_free(queue->reserve_q);
5712bfe3f2eSlogwang 	rte_free(queue->paddress_q);
5722bfe3f2eSlogwang 	rte_free(queue);
5732bfe3f2eSlogwang }
5742bfe3f2eSlogwang 
5752bfe3f2eSlogwang void
eth_rx_queue_stats_get(void * vqueue,struct rte_eth_stats * stats)5762bfe3f2eSlogwang eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
5772bfe3f2eSlogwang {
5782bfe3f2eSlogwang 	struct ark_rx_queue *queue;
5792bfe3f2eSlogwang 	struct ark_udm_t *udm;
5802bfe3f2eSlogwang 
5812bfe3f2eSlogwang 	queue = vqueue;
5822bfe3f2eSlogwang 	if (queue == 0)
5832bfe3f2eSlogwang 		return;
5842bfe3f2eSlogwang 	udm = queue->udm;
5852bfe3f2eSlogwang 
5862bfe3f2eSlogwang 	uint64_t ibytes = ark_udm_bytes(udm);
5872bfe3f2eSlogwang 	uint64_t ipackets = ark_udm_packets(udm);
5882bfe3f2eSlogwang 	uint64_t idropped = ark_udm_dropped(queue->udm);
5892bfe3f2eSlogwang 
5902bfe3f2eSlogwang 	stats->q_ipackets[queue->queue_index] = ipackets;
5912bfe3f2eSlogwang 	stats->q_ibytes[queue->queue_index] = ibytes;
5922bfe3f2eSlogwang 	stats->q_errors[queue->queue_index] = idropped;
5932bfe3f2eSlogwang 	stats->ipackets += ipackets;
5942bfe3f2eSlogwang 	stats->ibytes += ibytes;
5952bfe3f2eSlogwang 	stats->imissed += idropped;
5962bfe3f2eSlogwang }
5972bfe3f2eSlogwang 
5982bfe3f2eSlogwang void
eth_rx_queue_stats_reset(void * vqueue)5992bfe3f2eSlogwang eth_rx_queue_stats_reset(void *vqueue)
6002bfe3f2eSlogwang {
6012bfe3f2eSlogwang 	struct ark_rx_queue *queue;
6022bfe3f2eSlogwang 
6032bfe3f2eSlogwang 	queue = vqueue;
6042bfe3f2eSlogwang 	if (queue == 0)
6052bfe3f2eSlogwang 		return;
6062bfe3f2eSlogwang 
6072bfe3f2eSlogwang 	ark_mpu_reset_stats(queue->mpu);
6082bfe3f2eSlogwang 	ark_udm_queue_stats_reset(queue->udm);
6092bfe3f2eSlogwang }
6102bfe3f2eSlogwang 
6112bfe3f2eSlogwang void
eth_ark_udm_force_close(struct rte_eth_dev * dev)6122bfe3f2eSlogwang eth_ark_udm_force_close(struct rte_eth_dev *dev)
6132bfe3f2eSlogwang {
6144b05018fSfengbojiang 	struct ark_adapter *ark = dev->data->dev_private;
6152bfe3f2eSlogwang 	struct ark_rx_queue *queue;
6162bfe3f2eSlogwang 	uint32_t index;
6172bfe3f2eSlogwang 	uint16_t i;
6182bfe3f2eSlogwang 
6192bfe3f2eSlogwang 	if (!ark_udm_is_flushed(ark->udm.v)) {
6202bfe3f2eSlogwang 		/* restart the MPUs */
621*2d9fd380Sjfb8856606 		ARK_PMD_LOG(NOTICE, "UDM not flushed -- forcing flush\n");
6222bfe3f2eSlogwang 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
6232bfe3f2eSlogwang 			queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
6242bfe3f2eSlogwang 			if (queue == 0)
6252bfe3f2eSlogwang 				continue;
6262bfe3f2eSlogwang 
6272bfe3f2eSlogwang 			ark_mpu_start(queue->mpu);
6282bfe3f2eSlogwang 			/* Add some buffers */
6292bfe3f2eSlogwang 			index = 100000 + queue->seed_index;
6302bfe3f2eSlogwang 			ark_mpu_set_producer(queue->mpu, index);
6312bfe3f2eSlogwang 		}
6322bfe3f2eSlogwang 		/* Wait to allow data to pass */
6332bfe3f2eSlogwang 		usleep(100);
6342bfe3f2eSlogwang 
635*2d9fd380Sjfb8856606 		ARK_PMD_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
6362bfe3f2eSlogwang 				ark_udm_is_flushed(ark->udm.v));
6372bfe3f2eSlogwang 	}
6382bfe3f2eSlogwang 	ark_udm_reset(ark->udm.v);
6392bfe3f2eSlogwang }
6402bfe3f2eSlogwang 
6412bfe3f2eSlogwang static void
ark_ethdev_rx_dump(const char * name,struct ark_rx_queue * queue)6422bfe3f2eSlogwang ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
6432bfe3f2eSlogwang {
6442bfe3f2eSlogwang 	if (queue == NULL)
6452bfe3f2eSlogwang 		return;
646*2d9fd380Sjfb8856606 	ARK_PMD_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
647*2d9fd380Sjfb8856606 	ARK_PMD_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
6482bfe3f2eSlogwang 			"queue_size", queue->queue_size,
6492bfe3f2eSlogwang 			"seed_index", queue->seed_index,
6502bfe3f2eSlogwang 			"prod_index", queue->prod_index,
6512bfe3f2eSlogwang 			"cons_index", queue->cons_index);
6522bfe3f2eSlogwang 
6532bfe3f2eSlogwang 	ark_mpu_dump(queue->mpu, name, queue->phys_qid);
6542bfe3f2eSlogwang 	ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
6552bfe3f2eSlogwang 	ark_udm_dump(queue->udm, name);
6562bfe3f2eSlogwang 	ark_udm_dump_setup(queue->udm, queue->phys_qid);
6572bfe3f2eSlogwang }
6582bfe3f2eSlogwang 
6592bfe3f2eSlogwang /* Only used in debug.
6602bfe3f2eSlogwang  * This function is a raw memory dump of a portion of an mbuf's memory
6612bfe3f2eSlogwang  * region.  The usual function, rte_pktmbuf_dump() only shows data
6622bfe3f2eSlogwang  * with respect to the data_off field.  This function show data
6632bfe3f2eSlogwang  * anywhere in the mbuf's buffer.  This is useful for examining
6642bfe3f2eSlogwang  * data in the headroom or tailroom portion of an mbuf.
6652bfe3f2eSlogwang  */
6662bfe3f2eSlogwang static void
dump_mbuf_data(struct rte_mbuf * mbuf,uint16_t lo,uint16_t hi)6672bfe3f2eSlogwang dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
6682bfe3f2eSlogwang {
6692bfe3f2eSlogwang 	uint16_t i, j;
6702bfe3f2eSlogwang 
671*2d9fd380Sjfb8856606 	ARK_PMD_LOG(DEBUG, " MBUF: %p len %d, off: %d\n",
672*2d9fd380Sjfb8856606 		    mbuf, mbuf->pkt_len, mbuf->data_off);
6732bfe3f2eSlogwang 	for (i = lo; i < hi; i += 16) {
6742bfe3f2eSlogwang 		uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
6752bfe3f2eSlogwang 
676*2d9fd380Sjfb8856606 		ARK_PMD_LOG(DEBUG, "  %6d:  ", i);
6772bfe3f2eSlogwang 		for (j = 0; j < 16; j++)
678*2d9fd380Sjfb8856606 			ARK_PMD_LOG(DEBUG, " %02x", dp[j]);
6792bfe3f2eSlogwang 
680*2d9fd380Sjfb8856606 		ARK_PMD_LOG(DEBUG, "\n");
6812bfe3f2eSlogwang 	}
6822bfe3f2eSlogwang }
683