xref: /dpdk/drivers/raw/ioat/ioat_rawdev_test.c (revision 245efe54)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 #include <inttypes.h>
7 #include <rte_mbuf.h>
8 #include "rte_rawdev.h"
9 #include "rte_ioat_rawdev.h"
10 #include "ioat_private.h"
11 
12 #define MAX_SUPPORTED_RAWDEVS 64
13 #define TEST_SKIPPED 77
14 #define COPY_LEN 1024
15 
16 int ioat_rawdev_test(uint16_t dev_id); /* pre-define to keep compiler happy */
17 
18 static struct rte_mempool *pool;
19 static unsigned short expected_ring_size[MAX_SUPPORTED_RAWDEVS];
20 
21 #define PRINT_ERR(...) print_err(__func__, __LINE__, __VA_ARGS__)
22 
23 static inline int
24 __rte_format_printf(3, 4)
print_err(const char * func,int lineno,const char * format,...)25 print_err(const char *func, int lineno, const char *format, ...)
26 {
27 	va_list ap;
28 	int ret;
29 
30 	ret = fprintf(stderr, "In %s:%d - ", func, lineno);
31 	va_start(ap, format);
32 	ret += vfprintf(stderr, format, ap);
33 	va_end(ap);
34 
35 	return ret;
36 }
37 
38 static int
do_multi_copies(int dev_id,int split_batches,int split_completions)39 do_multi_copies(int dev_id, int split_batches, int split_completions)
40 {
41 	struct rte_mbuf *srcs[32], *dsts[32];
42 	struct rte_mbuf *completed_src[64];
43 	struct rte_mbuf *completed_dst[64];
44 	unsigned int i, j;
45 
46 	for (i = 0; i < RTE_DIM(srcs); i++) {
47 		char *src_data;
48 
49 		if (split_batches && i == RTE_DIM(srcs) / 2)
50 			rte_ioat_perform_ops(dev_id);
51 
52 		srcs[i] = rte_pktmbuf_alloc(pool);
53 		dsts[i] = rte_pktmbuf_alloc(pool);
54 		src_data = rte_pktmbuf_mtod(srcs[i], char *);
55 
56 		for (j = 0; j < COPY_LEN; j++)
57 			src_data[j] = rand() & 0xFF;
58 
59 		if (rte_ioat_enqueue_copy(dev_id,
60 				srcs[i]->buf_iova + srcs[i]->data_off,
61 				dsts[i]->buf_iova + dsts[i]->data_off,
62 				COPY_LEN,
63 				(uintptr_t)srcs[i],
64 				(uintptr_t)dsts[i]) != 1) {
65 			PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n",
66 					i);
67 			return -1;
68 		}
69 	}
70 	rte_ioat_perform_ops(dev_id);
71 	usleep(100);
72 
73 	if (split_completions) {
74 		/* gather completions in two halves */
75 		uint16_t half_len = RTE_DIM(srcs) / 2;
76 		if (rte_ioat_completed_ops(dev_id, half_len, NULL, NULL,
77 				(void *)completed_src,
78 				(void *)completed_dst) != half_len) {
79 			PRINT_ERR("Error with rte_ioat_completed_ops - first half request\n");
80 			rte_rawdev_dump(dev_id, stdout);
81 			return -1;
82 		}
83 		if (rte_ioat_completed_ops(dev_id, half_len, NULL, NULL,
84 				(void *)&completed_src[half_len],
85 				(void *)&completed_dst[half_len]) != half_len) {
86 			PRINT_ERR("Error with rte_ioat_completed_ops - second half request\n");
87 			rte_rawdev_dump(dev_id, stdout);
88 			return -1;
89 		}
90 	} else {
91 		/* gather all completions in one go */
92 		if (rte_ioat_completed_ops(dev_id, RTE_DIM(completed_src), NULL, NULL,
93 				(void *)completed_src,
94 				(void *)completed_dst) != RTE_DIM(srcs)) {
95 			PRINT_ERR("Error with rte_ioat_completed_ops\n");
96 			rte_rawdev_dump(dev_id, stdout);
97 			return -1;
98 		}
99 	}
100 	for (i = 0; i < RTE_DIM(srcs); i++) {
101 		char *src_data, *dst_data;
102 
103 		if (completed_src[i] != srcs[i]) {
104 			PRINT_ERR("Error with source pointer %u\n", i);
105 			return -1;
106 		}
107 		if (completed_dst[i] != dsts[i]) {
108 			PRINT_ERR("Error with dest pointer %u\n", i);
109 			return -1;
110 		}
111 
112 		src_data = rte_pktmbuf_mtod(srcs[i], char *);
113 		dst_data = rte_pktmbuf_mtod(dsts[i], char *);
114 		for (j = 0; j < COPY_LEN; j++)
115 			if (src_data[j] != dst_data[j]) {
116 				PRINT_ERR("Error with copy of packet %u, byte %u\n",
117 						i, j);
118 				return -1;
119 			}
120 		rte_pktmbuf_free(srcs[i]);
121 		rte_pktmbuf_free(dsts[i]);
122 	}
123 	return 0;
124 }
125 
126 static int
test_enqueue_copies(int dev_id)127 test_enqueue_copies(int dev_id)
128 {
129 	unsigned int i;
130 
131 	/* test doing a single copy */
132 	do {
133 		struct rte_mbuf *src, *dst;
134 		char *src_data, *dst_data;
135 		struct rte_mbuf *completed[2] = {0};
136 
137 		src = rte_pktmbuf_alloc(pool);
138 		dst = rte_pktmbuf_alloc(pool);
139 		src_data = rte_pktmbuf_mtod(src, char *);
140 		dst_data = rte_pktmbuf_mtod(dst, char *);
141 
142 		for (i = 0; i < COPY_LEN; i++)
143 			src_data[i] = rand() & 0xFF;
144 
145 		if (rte_ioat_enqueue_copy(dev_id,
146 				src->buf_iova + src->data_off,
147 				dst->buf_iova + dst->data_off,
148 				COPY_LEN,
149 				(uintptr_t)src,
150 				(uintptr_t)dst) != 1) {
151 			PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
152 			return -1;
153 		}
154 		rte_ioat_perform_ops(dev_id);
155 		usleep(10);
156 
157 		if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0],
158 				(void *)&completed[1]) != 1) {
159 			PRINT_ERR("Error with rte_ioat_completed_ops\n");
160 			return -1;
161 		}
162 		if (completed[0] != src || completed[1] != dst) {
163 			PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
164 					completed[0], completed[1], src, dst);
165 			return -1;
166 		}
167 
168 		for (i = 0; i < COPY_LEN; i++)
169 			if (dst_data[i] != src_data[i]) {
170 				PRINT_ERR("Data mismatch at char %u [Got %02x not %02x]\n",
171 						i, dst_data[i], src_data[i]);
172 				return -1;
173 			}
174 		rte_pktmbuf_free(src);
175 		rte_pktmbuf_free(dst);
176 
177 		/* check ring is now empty */
178 		if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0],
179 				(void *)&completed[1]) != 0) {
180 			PRINT_ERR("Error: got unexpected returned handles from rte_ioat_completed_ops\n");
181 			return -1;
182 		}
183 	} while (0);
184 
185 	/* test doing a multiple single copies */
186 	do {
187 		const uint16_t max_ops = 4;
188 		struct rte_mbuf *src, *dst;
189 		char *src_data, *dst_data;
190 		struct rte_mbuf *completed[32] = {0};
191 		const uint16_t max_completions = RTE_DIM(completed) / 2;
192 
193 		src = rte_pktmbuf_alloc(pool);
194 		dst = rte_pktmbuf_alloc(pool);
195 		src_data = rte_pktmbuf_mtod(src, char *);
196 		dst_data = rte_pktmbuf_mtod(dst, char *);
197 
198 		for (i = 0; i < COPY_LEN; i++)
199 			src_data[i] = rand() & 0xFF;
200 
201 		/* perform the same copy <max_ops> times */
202 		for (i = 0; i < max_ops; i++) {
203 			if (rte_ioat_enqueue_copy(dev_id,
204 					src->buf_iova + src->data_off,
205 					dst->buf_iova + dst->data_off,
206 					COPY_LEN,
207 					(uintptr_t)src,
208 					(uintptr_t)dst) != 1) {
209 				PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
210 				return -1;
211 			}
212 			rte_ioat_perform_ops(dev_id);
213 		}
214 		usleep(10);
215 
216 		if (rte_ioat_completed_ops(dev_id, max_completions, NULL, NULL,
217 				(void *)&completed[0],
218 				(void *)&completed[max_completions]) != max_ops) {
219 			PRINT_ERR("Error with rte_ioat_completed_ops\n");
220 			rte_rawdev_dump(dev_id, stdout);
221 			return -1;
222 		}
223 		if (completed[0] != src || completed[max_completions] != dst) {
224 			PRINT_ERR("Error with completions: got (%p, %p), not (%p,%p)\n",
225 					completed[0], completed[max_completions], src, dst);
226 			return -1;
227 		}
228 
229 		for (i = 0; i < COPY_LEN; i++)
230 			if (dst_data[i] != src_data[i]) {
231 				PRINT_ERR("Data mismatch at char %u\n", i);
232 				return -1;
233 			}
234 		rte_pktmbuf_free(src);
235 		rte_pktmbuf_free(dst);
236 	} while (0);
237 
238 	/* test doing multiple copies */
239 	do_multi_copies(dev_id, 0, 0); /* enqueue and complete one batch at a time */
240 	do_multi_copies(dev_id, 1, 0); /* enqueue 2 batches and then complete both */
241 	do_multi_copies(dev_id, 0, 1); /* enqueue 1 batch, then complete in two halves */
242 	return 0;
243 }
244 
245 static int
test_enqueue_fill(int dev_id)246 test_enqueue_fill(int dev_id)
247 {
248 	const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
249 	struct rte_mbuf *dst = rte_pktmbuf_alloc(pool);
250 	char *dst_data = rte_pktmbuf_mtod(dst, char *);
251 	struct rte_mbuf *completed[2] = {0};
252 	uint64_t pattern = 0xfedcba9876543210;
253 	unsigned int i, j;
254 
255 	for (i = 0; i < RTE_DIM(lengths); i++) {
256 		/* reset dst_data */
257 		memset(dst_data, 0, lengths[i]);
258 
259 		/* perform the fill operation */
260 		if (rte_ioat_enqueue_fill(dev_id, pattern,
261 				dst->buf_iova + dst->data_off, lengths[i],
262 				(uintptr_t)dst) != 1) {
263 			PRINT_ERR("Error with rte_ioat_enqueue_fill\n");
264 			return -1;
265 		}
266 
267 		rte_ioat_perform_ops(dev_id);
268 		usleep(100);
269 
270 		if (rte_ioat_completed_ops(dev_id, 1, NULL, NULL, (void *)&completed[0],
271 			(void *)&completed[1]) != 1) {
272 			PRINT_ERR("Error with completed ops\n");
273 			return -1;
274 		}
275 		/* check the result */
276 		for (j = 0; j < lengths[i]; j++) {
277 			char pat_byte = ((char *)&pattern)[j % 8];
278 			if (dst_data[j] != pat_byte) {
279 				PRINT_ERR("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
280 						lengths[i], dst_data[j], pat_byte);
281 				return -1;
282 			}
283 		}
284 	}
285 
286 	rte_pktmbuf_free(dst);
287 	return 0;
288 }
289 
290 static int
test_burst_capacity(int dev_id)291 test_burst_capacity(int dev_id)
292 {
293 #define BURST_SIZE			64
294 	const unsigned int ring_space = rte_ioat_burst_capacity(dev_id);
295 	struct rte_mbuf *src, *dst;
296 	unsigned int length = 1024;
297 	unsigned int i, j, iter;
298 	unsigned int old_cap, cap;
299 	uintptr_t completions[BURST_SIZE];
300 
301 	src = rte_pktmbuf_alloc(pool);
302 	dst = rte_pktmbuf_alloc(pool);
303 
304 	old_cap = ring_space;
305 	/* to test capacity, we enqueue elements and check capacity is reduced
306 	 * by one each time - rebaselining the expected value after each burst
307 	 * as the capacity is only for a burst. We enqueue multiple bursts to
308 	 * fill up half the ring, before emptying it again. We do this twice to
309 	 * ensure that we get to test scenarios where we get ring wrap-around
310 	 */
311 	for (iter = 0; iter < 2; iter++) {
312 		for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
313 			cap = rte_ioat_burst_capacity(dev_id);
314 			if (cap > old_cap) {
315 				PRINT_ERR("Error, avail ring capacity has gone up, not down\n");
316 				return -1;
317 			}
318 			old_cap = cap;
319 
320 			for (j = 0; j < BURST_SIZE; j++) {
321 				if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
322 						rte_pktmbuf_iova(dst), length, 0, 0) != 1) {
323 					PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
324 					return -1;
325 				}
326 				if (cap - rte_ioat_burst_capacity(dev_id) != j + 1) {
327 					PRINT_ERR("Error, ring capacity did not change as expected\n");
328 					return -1;
329 				}
330 			}
331 			rte_ioat_perform_ops(dev_id);
332 		}
333 		usleep(100);
334 		for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
335 			if (rte_ioat_completed_ops(dev_id, BURST_SIZE,
336 					NULL, NULL,
337 					completions, completions) != BURST_SIZE) {
338 				PRINT_ERR("Error with completions\n");
339 				return -1;
340 			}
341 		}
342 		if (rte_ioat_burst_capacity(dev_id) != ring_space) {
343 			PRINT_ERR("Error, ring capacity has not reset to original value\n");
344 			return -1;
345 		}
346 		old_cap = ring_space;
347 	}
348 
349 	rte_pktmbuf_free(src);
350 	rte_pktmbuf_free(dst);
351 
352 	return 0;
353 }
354 
355 static int
test_completion_status(int dev_id)356 test_completion_status(int dev_id)
357 {
358 #define COMP_BURST_SZ	16
359 	const unsigned int fail_copy[] = {0, 7, 15};
360 	struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
361 	struct rte_mbuf *completed_src[COMP_BURST_SZ * 2];
362 	struct rte_mbuf *completed_dst[COMP_BURST_SZ * 2];
363 	unsigned int length = 1024;
364 	unsigned int i;
365 	uint8_t not_ok = 0;
366 
367 	/* Test single full batch statuses */
368 	for (i = 0; i < RTE_DIM(fail_copy); i++) {
369 		uint32_t status[COMP_BURST_SZ] = {0};
370 		unsigned int j;
371 
372 		for (j = 0; j < COMP_BURST_SZ; j++) {
373 			srcs[j] = rte_pktmbuf_alloc(pool);
374 			dsts[j] = rte_pktmbuf_alloc(pool);
375 
376 			if (rte_ioat_enqueue_copy(dev_id,
377 					(j == fail_copy[i] ? (phys_addr_t)NULL :
378 							(srcs[j]->buf_iova + srcs[j]->data_off)),
379 					dsts[j]->buf_iova + dsts[j]->data_off,
380 					length,
381 					(uintptr_t)srcs[j],
382 					(uintptr_t)dsts[j]) != 1) {
383 				PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j);
384 				return -1;
385 			}
386 		}
387 		rte_ioat_perform_ops(dev_id);
388 		usleep(100);
389 
390 		if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ, status, &not_ok,
391 				(void *)completed_src, (void *)completed_dst) != COMP_BURST_SZ) {
392 			PRINT_ERR("Error with rte_ioat_completed_ops\n");
393 			rte_rawdev_dump(dev_id, stdout);
394 			return -1;
395 		}
396 		if (not_ok != 1 || status[fail_copy[i]] == RTE_IOAT_OP_SUCCESS) {
397 			unsigned int j;
398 			PRINT_ERR("Error, missing expected failed copy, %u\n", fail_copy[i]);
399 			for (j = 0; j < COMP_BURST_SZ; j++)
400 				printf("%u ", status[j]);
401 			printf("<-- Statuses\n");
402 			return -1;
403 		}
404 		for (j = 0; j < COMP_BURST_SZ; j++) {
405 			rte_pktmbuf_free(completed_src[j]);
406 			rte_pktmbuf_free(completed_dst[j]);
407 		}
408 	}
409 
410 	/* Test gathering status for two batches at once */
411 	for (i = 0; i < RTE_DIM(fail_copy); i++) {
412 		uint32_t status[COMP_BURST_SZ] = {0};
413 		unsigned int batch, j;
414 		unsigned int expected_failures = 0;
415 
416 		for (batch = 0; batch < 2; batch++) {
417 			for (j = 0; j < COMP_BURST_SZ/2; j++) {
418 				srcs[j] = rte_pktmbuf_alloc(pool);
419 				dsts[j] = rte_pktmbuf_alloc(pool);
420 
421 				if (j == fail_copy[i])
422 					expected_failures++;
423 				if (rte_ioat_enqueue_copy(dev_id,
424 						(j == fail_copy[i] ? (phys_addr_t)NULL :
425 							(srcs[j]->buf_iova + srcs[j]->data_off)),
426 						dsts[j]->buf_iova + dsts[j]->data_off,
427 						length,
428 						(uintptr_t)srcs[j],
429 						(uintptr_t)dsts[j]) != 1) {
430 					PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n",
431 							j);
432 					return -1;
433 				}
434 			}
435 			rte_ioat_perform_ops(dev_id);
436 		}
437 		usleep(100);
438 
439 		if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ, status, &not_ok,
440 				(void *)completed_src, (void *)completed_dst) != COMP_BURST_SZ) {
441 			PRINT_ERR("Error with rte_ioat_completed_ops\n");
442 			rte_rawdev_dump(dev_id, stdout);
443 			return -1;
444 		}
445 		if (not_ok != expected_failures) {
446 			unsigned int j;
447 			PRINT_ERR("Error, missing expected failed copy, got %u, not %u\n",
448 					not_ok, expected_failures);
449 			for (j = 0; j < COMP_BURST_SZ; j++)
450 				printf("%u ", status[j]);
451 			printf("<-- Statuses\n");
452 			return -1;
453 		}
454 		for (j = 0; j < COMP_BURST_SZ; j++) {
455 			rte_pktmbuf_free(completed_src[j]);
456 			rte_pktmbuf_free(completed_dst[j]);
457 		}
458 	}
459 
460 	/* Test gathering status for half batch at a time */
461 	for (i = 0; i < RTE_DIM(fail_copy); i++) {
462 		uint32_t status[COMP_BURST_SZ] = {0};
463 		unsigned int j;
464 
465 		for (j = 0; j < COMP_BURST_SZ; j++) {
466 			srcs[j] = rte_pktmbuf_alloc(pool);
467 			dsts[j] = rte_pktmbuf_alloc(pool);
468 
469 			if (rte_ioat_enqueue_copy(dev_id,
470 					(j == fail_copy[i] ? (phys_addr_t)NULL :
471 							(srcs[j]->buf_iova + srcs[j]->data_off)),
472 					dsts[j]->buf_iova + dsts[j]->data_off,
473 					length,
474 					(uintptr_t)srcs[j],
475 					(uintptr_t)dsts[j]) != 1) {
476 				PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j);
477 				return -1;
478 			}
479 		}
480 		rte_ioat_perform_ops(dev_id);
481 		usleep(100);
482 
483 		if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ / 2, status, &not_ok,
484 				(void *)completed_src,
485 				(void *)completed_dst) != (COMP_BURST_SZ / 2)) {
486 			PRINT_ERR("Error with rte_ioat_completed_ops\n");
487 			rte_rawdev_dump(dev_id, stdout);
488 			return -1;
489 		}
490 		if (fail_copy[i] < COMP_BURST_SZ / 2 &&
491 				(not_ok != 1 || status[fail_copy[i]] == RTE_IOAT_OP_SUCCESS)) {
492 			PRINT_ERR("Missing expected failure in first half-batch\n");
493 			rte_rawdev_dump(dev_id, stdout);
494 			return -1;
495 		}
496 		if (rte_ioat_completed_ops(dev_id, COMP_BURST_SZ / 2, status, &not_ok,
497 				(void *)&completed_src[COMP_BURST_SZ / 2],
498 				(void *)&completed_dst[COMP_BURST_SZ / 2]) != (COMP_BURST_SZ / 2)) {
499 			PRINT_ERR("Error with rte_ioat_completed_ops\n");
500 			rte_rawdev_dump(dev_id, stdout);
501 			return -1;
502 		}
503 		if (fail_copy[i] >= COMP_BURST_SZ / 2 && (not_ok != 1 ||
504 				status[fail_copy[i] - (COMP_BURST_SZ / 2)]
505 					== RTE_IOAT_OP_SUCCESS)) {
506 			PRINT_ERR("Missing expected failure in second half-batch\n");
507 			rte_rawdev_dump(dev_id, stdout);
508 			return -1;
509 		}
510 
511 		for (j = 0; j < COMP_BURST_SZ; j++) {
512 			rte_pktmbuf_free(completed_src[j]);
513 			rte_pktmbuf_free(completed_dst[j]);
514 		}
515 	}
516 
517 	/* Test gathering statuses with fence */
518 	for (i = 1; i < RTE_DIM(fail_copy); i++) {
519 		uint32_t status[COMP_BURST_SZ * 2] = {0};
520 		unsigned int j;
521 		uint16_t count;
522 
523 		for (j = 0; j < COMP_BURST_SZ; j++) {
524 			srcs[j] = rte_pktmbuf_alloc(pool);
525 			dsts[j] = rte_pktmbuf_alloc(pool);
526 
527 			/* always fail the first copy */
528 			if (rte_ioat_enqueue_copy(dev_id,
529 					(j == 0 ? (phys_addr_t)NULL :
530 						(srcs[j]->buf_iova + srcs[j]->data_off)),
531 					dsts[j]->buf_iova + dsts[j]->data_off,
532 					length,
533 					(uintptr_t)srcs[j],
534 					(uintptr_t)dsts[j]) != 1) {
535 				PRINT_ERR("Error with rte_ioat_enqueue_copy for buffer %u\n", j);
536 				return -1;
537 			}
538 			/* put in a fence which will stop any further transactions
539 			 * because we had a previous failure.
540 			 */
541 			if (j == fail_copy[i])
542 				rte_ioat_fence(dev_id);
543 		}
544 		rte_ioat_perform_ops(dev_id);
545 		usleep(100);
546 
547 		count = rte_ioat_completed_ops(dev_id, COMP_BURST_SZ * 2, status, &not_ok,
548 				(void *)completed_src, (void *)completed_dst);
549 		if (count != COMP_BURST_SZ) {
550 			PRINT_ERR("Error with rte_ioat_completed_ops, got %u not %u\n",
551 					count, COMP_BURST_SZ);
552 			for (j = 0; j < count; j++)
553 				printf("%u ", status[j]);
554 			printf("<-- Statuses\n");
555 			return -1;
556 		}
557 		if (not_ok != COMP_BURST_SZ - fail_copy[i]) {
558 			PRINT_ERR("Unexpected failed copy count, got %u, expected %u\n",
559 					not_ok, COMP_BURST_SZ - fail_copy[i]);
560 			for (j = 0; j < COMP_BURST_SZ; j++)
561 				printf("%u ", status[j]);
562 			printf("<-- Statuses\n");
563 			return -1;
564 		}
565 		if (status[0] == RTE_IOAT_OP_SUCCESS || status[0] == RTE_IOAT_OP_SKIPPED) {
566 			PRINT_ERR("Error, op 0 unexpectedly did not fail.\n");
567 			return -1;
568 		}
569 		for (j = 1; j <= fail_copy[i]; j++) {
570 			if (status[j] != RTE_IOAT_OP_SUCCESS) {
571 				PRINT_ERR("Error, op %u unexpectedly failed\n", j);
572 				return -1;
573 			}
574 		}
575 		for (j = fail_copy[i] + 1; j < COMP_BURST_SZ; j++) {
576 			if (status[j] != RTE_IOAT_OP_SKIPPED) {
577 				PRINT_ERR("Error, all descriptors after fence should be invalid\n");
578 				return -1;
579 			}
580 		}
581 		for (j = 0; j < COMP_BURST_SZ; j++) {
582 			rte_pktmbuf_free(completed_src[j]);
583 			rte_pktmbuf_free(completed_dst[j]);
584 		}
585 	}
586 
587 	return 0;
588 }
589 
590 int
ioat_rawdev_test(uint16_t dev_id)591 ioat_rawdev_test(uint16_t dev_id)
592 {
593 #define IOAT_TEST_RINGSIZE 512
594 	const struct rte_idxd_rawdev *idxd =
595 			(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
596 	const enum rte_ioat_dev_type ioat_type = idxd->type;
597 	struct rte_ioat_rawdev_config p = { .ring_size = -1 };
598 	struct rte_rawdev_info info = { .dev_private = &p };
599 	struct rte_rawdev_xstats_name *snames = NULL;
600 	uint64_t *stats = NULL;
601 	unsigned int *ids = NULL;
602 	unsigned int nb_xstats;
603 	unsigned int i;
604 
605 	if (dev_id >= MAX_SUPPORTED_RAWDEVS) {
606 		printf("Skipping test. Cannot test rawdevs with id's greater than %d\n",
607 				MAX_SUPPORTED_RAWDEVS);
608 		return TEST_SKIPPED;
609 	}
610 
611 	rte_rawdev_info_get(dev_id, &info, sizeof(p));
612 	if (p.ring_size != expected_ring_size[dev_id]) {
613 		PRINT_ERR("Error, initial ring size is not as expected (Actual: %d, Expected: %d)\n",
614 				(int)p.ring_size, expected_ring_size[dev_id]);
615 		return -1;
616 	}
617 
618 	p.ring_size = IOAT_TEST_RINGSIZE;
619 	if (rte_rawdev_configure(dev_id, &info, sizeof(p)) != 0) {
620 		PRINT_ERR("Error with rte_rawdev_configure()\n");
621 		return -1;
622 	}
623 	rte_rawdev_info_get(dev_id, &info, sizeof(p));
624 	if (p.ring_size != IOAT_TEST_RINGSIZE) {
625 		PRINT_ERR("Error, ring size is not %d (%d)\n",
626 				IOAT_TEST_RINGSIZE, (int)p.ring_size);
627 		return -1;
628 	}
629 	expected_ring_size[dev_id] = p.ring_size;
630 
631 	if (rte_rawdev_start(dev_id) != 0) {
632 		PRINT_ERR("Error with rte_rawdev_start()\n");
633 		return -1;
634 	}
635 
636 	pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
637 			p.ring_size * 2, /* n == num elements */
638 			32,  /* cache size */
639 			0,   /* priv size */
640 			2048, /* data room size */
641 			info.socket_id);
642 	if (pool == NULL) {
643 		PRINT_ERR("Error with mempool creation\n");
644 		return -1;
645 	}
646 
647 	/* allocate memory for xstats names and values */
648 	nb_xstats = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
649 
650 	snames = malloc(sizeof(*snames) * nb_xstats);
651 	if (snames == NULL) {
652 		PRINT_ERR("Error allocating xstat names memory\n");
653 		goto err;
654 	}
655 	rte_rawdev_xstats_names_get(dev_id, snames, nb_xstats);
656 
657 	ids = malloc(sizeof(*ids) * nb_xstats);
658 	if (ids == NULL) {
659 		PRINT_ERR("Error allocating xstat ids memory\n");
660 		goto err;
661 	}
662 	for (i = 0; i < nb_xstats; i++)
663 		ids[i] = i;
664 
665 	stats = malloc(sizeof(*stats) * nb_xstats);
666 	if (stats == NULL) {
667 		PRINT_ERR("Error allocating xstat memory\n");
668 		goto err;
669 	}
670 
671 	/* run the test cases */
672 	printf("Running Copy Tests\n");
673 	for (i = 0; i < 100; i++) {
674 		unsigned int j;
675 
676 		if (test_enqueue_copies(dev_id) != 0)
677 			goto err;
678 
679 		rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
680 		for (j = 0; j < nb_xstats; j++)
681 			printf("%s: %"PRIu64"   ", snames[j].name, stats[j]);
682 		printf("\r");
683 	}
684 	printf("\n");
685 
686 	/* test enqueue fill operation */
687 	printf("Running Fill Tests\n");
688 	for (i = 0; i < 100; i++) {
689 		unsigned int j;
690 
691 		if (test_enqueue_fill(dev_id) != 0)
692 			goto err;
693 
694 		rte_rawdev_xstats_get(dev_id, ids, stats, nb_xstats);
695 		for (j = 0; j < nb_xstats; j++)
696 			printf("%s: %"PRIu64"   ", snames[j].name, stats[j]);
697 		printf("\r");
698 	}
699 	printf("\n");
700 
701 	printf("Running Burst Capacity Test\n");
702 	if (test_burst_capacity(dev_id) != 0)
703 		goto err;
704 
705 	/* only DSA devices report address errors, and we can only use null pointers
706 	 * to generate those errors when DPDK is in VA mode.
707 	 */
708 	if (rte_eal_iova_mode() == RTE_IOVA_VA && ioat_type == RTE_IDXD_DEV) {
709 		printf("Running Completions Status Test\n");
710 		if (test_completion_status(dev_id) != 0)
711 			goto err;
712 	}
713 
714 	rte_rawdev_stop(dev_id);
715 	if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
716 		PRINT_ERR("Error resetting xstat values\n");
717 		goto err;
718 	}
719 
720 	rte_mempool_free(pool);
721 	free(snames);
722 	free(stats);
723 	free(ids);
724 	return 0;
725 
726 err:
727 	rte_rawdev_stop(dev_id);
728 	rte_rawdev_xstats_reset(dev_id, NULL, 0);
729 	rte_mempool_free(pool);
730 	free(snames);
731 	free(stats);
732 	free(ids);
733 	return -1;
734 }
735