1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <unistd.h>
9 
10 #include <rte_common.h>
11 #include <rte_debug.h>
12 #include <rte_eal.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_mbuf.h>
16 #include <rte_mbuf_pool_ops.h>
17 #include <rte_memcpy.h>
18 #include <rte_memory.h>
19 #include <rte_mempool.h>
20 #include <rte_per_lcore.h>
21 #include <rte_rawdev.h>
22 
23 #include "otx2_dpi_rawdev.h"
24 
25 static struct dpi_cring_data_s cring;
26 
27 static uint8_t
buffer_fill(uint8_t * addr,int len,uint8_t val)28 buffer_fill(uint8_t *addr, int len, uint8_t val)
29 {
30 	int j = 0;
31 
32 	memset(addr, 0, len);
33 	for (j = 0; j < len; j++)
34 		*(addr + j) = val++;
35 
36 	return val;
37 }
38 
39 static int
validate_buffer(uint8_t * saddr,uint8_t * daddr,int len)40 validate_buffer(uint8_t *saddr, uint8_t *daddr, int len)
41 {
42 	int j = 0, ret = 0;
43 
44 	for (j = 0; j < len; j++) {
45 		if (*(saddr + j) != *(daddr + j)) {
46 			otx2_dpi_dbg("FAIL: Data Integrity failed");
47 			otx2_dpi_dbg("index: %d, Expected: 0x%x, Actual: 0x%x",
48 				     j, *(saddr + j), *(daddr + j));
49 			ret = -1;
50 			break;
51 		}
52 	}
53 
54 	return ret;
55 }
56 
57 static inline int
dma_test_internal(int dma_port,int buf_size)58 dma_test_internal(int dma_port, int buf_size)
59 {
60 	struct dpi_dma_req_compl_s *comp_data;
61 	struct dpi_dma_queue_ctx_s ctx = {0};
62 	struct rte_rawdev_buf buf = {0};
63 	struct rte_rawdev_buf *d_buf[1];
64 	struct rte_rawdev_buf *bufp[1];
65 	struct dpi_dma_buf_ptr_s cmd;
66 	union dpi_dma_ptr_u rptr = { {0} };
67 	union dpi_dma_ptr_u wptr = { {0} };
68 	uint8_t *fptr, *lptr;
69 	int ret;
70 
71 	fptr = (uint8_t *)rte_malloc("dummy", buf_size, 128);
72 	lptr = (uint8_t *)rte_malloc("dummy", buf_size, 128);
73 	comp_data = rte_malloc("dummy", buf_size, 128);
74 	if (fptr == NULL || lptr == NULL || comp_data == NULL) {
75 		otx2_dpi_dbg("Unable to allocate internal memory");
76 		return -ENOMEM;
77 	}
78 
79 	buffer_fill(fptr, buf_size, 0);
80 	memset(&cmd, 0, sizeof(struct dpi_dma_buf_ptr_s));
81 	memset(lptr, 0, buf_size);
82 	memset(comp_data, 0, buf_size);
83 	rptr.s.ptr = (uint64_t)fptr;
84 	rptr.s.length = buf_size;
85 	wptr.s.ptr = (uint64_t)lptr;
86 	wptr.s.length = buf_size;
87 	cmd.rptr[0] = &rptr;
88 	cmd.wptr[0] = &wptr;
89 	cmd.rptr_cnt = 1;
90 	cmd.wptr_cnt = 1;
91 	cmd.comp_ptr = comp_data;
92 	buf.buf_addr = (void *)&cmd;
93 	bufp[0] = &buf;
94 
95 	ctx.xtype = DPI_XTYPE_INTERNAL_ONLY;
96 	ctx.pt = 0;
97 	ctx.c_ring = &cring;
98 
99 	ret = rte_rawdev_enqueue_buffers(dma_port,
100 					 (struct rte_rawdev_buf **)bufp, 1,
101 					 &ctx);
102 	if (ret < 0) {
103 		otx2_dpi_dbg("Enqueue request failed");
104 		return 0;
105 	}
106 
107 	/* Wait and dequeue completion */
108 	do {
109 		sleep(1);
110 		ret = rte_rawdev_dequeue_buffers(dma_port, &d_buf[0], 1, &ctx);
111 		if (ret)
112 			break;
113 
114 		otx2_dpi_dbg("Dequeue request not completed");
115 	} while (1);
116 
117 	if (validate_buffer(fptr, lptr, buf_size)) {
118 		otx2_dpi_dbg("DMA transfer failed\n");
119 		return -EAGAIN;
120 	}
121 	otx2_dpi_dbg("Internal Only DMA transfer successfully completed");
122 
123 	if (lptr)
124 		rte_free(lptr);
125 	if (fptr)
126 		rte_free(fptr);
127 	if (comp_data)
128 		rte_free(comp_data);
129 
130 	return 0;
131 }
132 
133 static void *
dpi_create_mempool(void)134 dpi_create_mempool(void)
135 {
136 	void *chunk_pool = NULL;
137 	char pool_name[25];
138 	int ret;
139 
140 	snprintf(pool_name, sizeof(pool_name), "dpi_chunk_pool");
141 
142 	chunk_pool = (void *)rte_mempool_create_empty(pool_name, 1024, 1024,
143 						      0, 0, rte_socket_id(), 0);
144 	if (chunk_pool == NULL) {
145 		otx2_dpi_dbg("Unable to create memory pool.");
146 		return NULL;
147 	}
148 
149 	ret = rte_mempool_set_ops_byname(chunk_pool,
150 					 rte_mbuf_platform_mempool_ops(), NULL);
151 	if (ret < 0) {
152 		otx2_dpi_dbg("Unable to set pool ops");
153 		rte_mempool_free(chunk_pool);
154 		return NULL;
155 	}
156 
157 	ret = rte_mempool_populate_default(chunk_pool);
158 	if (ret < 0) {
159 		otx2_dpi_dbg("Unable to populate pool");
160 		return NULL;
161 	}
162 
163 	return chunk_pool;
164 }
165 
166 int
test_otx2_dma_rawdev(uint16_t val)167 test_otx2_dma_rawdev(uint16_t val)
168 {
169 	struct rte_rawdev_info rdev_info = {0};
170 	struct dpi_rawdev_conf_s conf = {0};
171 	int ret, i, size = 1024;
172 	int nb_ports;
173 
174 	RTE_SET_USED(val);
175 	nb_ports = rte_rawdev_count();
176 	if (nb_ports == 0) {
177 		otx2_dpi_dbg("No Rawdev ports - bye");
178 		return -ENODEV;
179 	}
180 
181 	i = rte_rawdev_get_dev_id("DPI:5:00.1");
182 	/* Configure rawdev ports */
183 	conf.chunk_pool = dpi_create_mempool();
184 	rdev_info.dev_private = &conf;
185 	ret = rte_rawdev_configure(i, (rte_rawdev_obj_t)&rdev_info,
186 			sizeof(conf));
187 	if (ret) {
188 		otx2_dpi_dbg("Unable to configure DPIVF %d", i);
189 		return -ENODEV;
190 	}
191 	otx2_dpi_dbg("rawdev %d configured successfully", i);
192 
193 	/* Each stream allocate its own completion ring data, store it in
194 	 * application context. Each stream needs to use same application
195 	 * context for enqueue/dequeue.
196 	 */
197 	cring.compl_data = rte_malloc("dummy", sizeof(void *) * 1024, 128);
198 	if (!cring.compl_data) {
199 		otx2_dpi_dbg("Completion allocation failed");
200 		return -ENOMEM;
201 	}
202 
203 	cring.max_cnt = 1024;
204 	cring.head = 0;
205 	cring.tail = 0;
206 
207 	ret = dma_test_internal(i, size);
208 	if (ret)
209 		otx2_dpi_dbg("DMA transfer failed for queue %d", i);
210 
211 	if (rte_rawdev_close(i))
212 		otx2_dpi_dbg("Dev close failed for port %d", i);
213 
214 	if (conf.chunk_pool)
215 		rte_mempool_free(conf.chunk_pool);
216 
217 	return ret;
218 }
219