1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
3 * Copyright(c) 2021 Intel Corporation
4 */
5
6 #include <inttypes.h>
7
8 #include <rte_dmadev.h>
9 #include <rte_mbuf.h>
10 #include <rte_pause.h>
11 #include <rte_cycles.h>
12 #include <rte_random.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_dmadev_pmd.h>
15
16 #include "test.h"
17 #include "test_dmadev_api.h"
18
19 #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
20
21 #define COPY_LEN 1024
22
23 static struct rte_mempool *pool;
24 static uint16_t id_count;
25
26 static void
27 __rte_format_printf(3, 4)
print_err(const char * func,int lineno,const char * format,...)28 print_err(const char *func, int lineno, const char *format, ...)
29 {
30 va_list ap;
31
32 fprintf(stderr, "In %s:%d - ", func, lineno);
33 va_start(ap, format);
34 vfprintf(stderr, format, ap);
35 va_end(ap);
36 }
37
38 static int
runtest(const char * printable,int (* test_fn)(int16_t dev_id,uint16_t vchan),int iterations,int16_t dev_id,uint16_t vchan,bool check_err_stats)39 runtest(const char *printable, int (*test_fn)(int16_t dev_id, uint16_t vchan), int iterations,
40 int16_t dev_id, uint16_t vchan, bool check_err_stats)
41 {
42 struct rte_dma_stats stats;
43 int i;
44
45 rte_dma_stats_reset(dev_id, vchan);
46 printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable,
47 check_err_stats ? " " : "(errors expected)");
48 for (i = 0; i < iterations; i++) {
49 if (test_fn(dev_id, vchan) < 0)
50 return -1;
51
52 rte_dma_stats_get(dev_id, 0, &stats);
53 printf("Ops submitted: %"PRIu64"\t", stats.submitted);
54 printf("Ops completed: %"PRIu64"\t", stats.completed);
55 printf("Errors: %"PRIu64"\r", stats.errors);
56
57 if (stats.completed != stats.submitted)
58 ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
59 if (check_err_stats && stats.errors != 0)
60 ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
61 }
62 printf("\n");
63 return 0;
64 }
65
66 static void
await_hw(int16_t dev_id,uint16_t vchan)67 await_hw(int16_t dev_id, uint16_t vchan)
68 {
69 enum rte_dma_vchan_status st;
70
71 if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) {
72 /* for drivers that don't support this op, just sleep for 1 millisecond */
73 rte_delay_us_sleep(1000);
74 return;
75 }
76
77 /* for those that do, *max* end time is one second from now, but all should be faster */
78 const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz();
79 while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) {
80 rte_pause();
81 rte_dma_vchan_status(dev_id, vchan, &st);
82 }
83 }
84
85 /* run a series of copy tests just using some different options for enqueues and completions */
86 static int
do_multi_copies(int16_t dev_id,uint16_t vchan,int split_batches,int split_completions,int use_completed_status)87 do_multi_copies(int16_t dev_id, uint16_t vchan,
88 int split_batches, /* submit 2 x 16 or 1 x 32 burst */
89 int split_completions, /* gather 2 x 16 or 1 x 32 completions */
90 int use_completed_status) /* use completed or completed_status function */
91 {
92 struct rte_mbuf *srcs[32], *dsts[32];
93 enum rte_dma_status_code sc[32];
94 unsigned int i, j;
95 bool dma_err = false;
96
97 /* Enqueue burst of copies and hit doorbell */
98 for (i = 0; i < RTE_DIM(srcs); i++) {
99 uint64_t *src_data;
100
101 if (split_batches && i == RTE_DIM(srcs) / 2)
102 rte_dma_submit(dev_id, vchan);
103
104 srcs[i] = rte_pktmbuf_alloc(pool);
105 dsts[i] = rte_pktmbuf_alloc(pool);
106 if (srcs[i] == NULL || dsts[i] == NULL)
107 ERR_RETURN("Error allocating buffers\n");
108
109 src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
110 for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
111 src_data[j] = rte_rand();
112
113 if (rte_dma_copy(dev_id, vchan, srcs[i]->buf_iova + srcs[i]->data_off,
114 dsts[i]->buf_iova + dsts[i]->data_off, COPY_LEN, 0) != id_count++)
115 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
116 }
117 rte_dma_submit(dev_id, vchan);
118
119 await_hw(dev_id, vchan);
120
121 if (split_completions) {
122 /* gather completions in two halves */
123 uint16_t half_len = RTE_DIM(srcs) / 2;
124 int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
125 if (ret != half_len || dma_err)
126 ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
127 ret, half_len, dma_err);
128
129 ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
130 if (ret != half_len || dma_err)
131 ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
132 ret, half_len, dma_err);
133 } else {
134 /* gather all completions in one go, using either
135 * completed or completed_status fns
136 */
137 if (!use_completed_status) {
138 int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
139 if (n != RTE_DIM(srcs) || dma_err)
140 ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n",
141 n, RTE_DIM(srcs), dma_err);
142 } else {
143 int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc);
144 if (n != RTE_DIM(srcs))
145 ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n",
146 n, RTE_DIM(srcs));
147
148 for (j = 0; j < (uint16_t)n; j++)
149 if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL)
150 ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n",
151 j, sc[j]);
152 }
153 }
154
155 /* check for empty */
156 int ret = use_completed_status ?
157 rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) :
158 rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
159 if (ret != 0)
160 ERR_RETURN("Error with completion check - ops unexpectedly returned\n");
161
162 for (i = 0; i < RTE_DIM(srcs); i++) {
163 char *src_data, *dst_data;
164
165 src_data = rte_pktmbuf_mtod(srcs[i], char *);
166 dst_data = rte_pktmbuf_mtod(dsts[i], char *);
167 for (j = 0; j < COPY_LEN; j++)
168 if (src_data[j] != dst_data[j])
169 ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j);
170
171 rte_pktmbuf_free(srcs[i]);
172 rte_pktmbuf_free(dsts[i]);
173 }
174 return 0;
175 }
176
177 static int
test_enqueue_copies(int16_t dev_id,uint16_t vchan)178 test_enqueue_copies(int16_t dev_id, uint16_t vchan)
179 {
180 unsigned int i;
181 uint16_t id;
182
183 /* test doing a single copy */
184 do {
185 struct rte_mbuf *src, *dst;
186 char *src_data, *dst_data;
187
188 src = rte_pktmbuf_alloc(pool);
189 dst = rte_pktmbuf_alloc(pool);
190 src_data = rte_pktmbuf_mtod(src, char *);
191 dst_data = rte_pktmbuf_mtod(dst, char *);
192
193 for (i = 0; i < COPY_LEN; i++)
194 src_data[i] = rte_rand() & 0xFF;
195
196 id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst),
197 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT);
198 if (id != id_count)
199 ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n",
200 id, id_count);
201
202 /* give time for copy to finish, then check it was done */
203 await_hw(dev_id, vchan);
204
205 for (i = 0; i < COPY_LEN; i++)
206 if (dst_data[i] != src_data[i])
207 ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i,
208 dst_data[i], src_data[i]);
209
210 /* now check completion works */
211 if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
212 ERR_RETURN("Error with rte_dma_completed\n");
213
214 if (id != id_count)
215 ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
216 id, id_count);
217
218 rte_pktmbuf_free(src);
219 rte_pktmbuf_free(dst);
220
221 /* now check completion returns nothing more */
222 if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
223 ERR_RETURN("Error with rte_dma_completed in empty check\n");
224
225 id_count++;
226
227 } while (0);
228
229 /* test doing a multiple single copies */
230 do {
231 const uint16_t max_ops = 4;
232 struct rte_mbuf *src, *dst;
233 char *src_data, *dst_data;
234 uint16_t count;
235
236 src = rte_pktmbuf_alloc(pool);
237 dst = rte_pktmbuf_alloc(pool);
238 src_data = rte_pktmbuf_mtod(src, char *);
239 dst_data = rte_pktmbuf_mtod(dst, char *);
240
241 for (i = 0; i < COPY_LEN; i++)
242 src_data[i] = rte_rand() & 0xFF;
243
244 /* perform the same copy <max_ops> times */
245 for (i = 0; i < max_ops; i++)
246 if (rte_dma_copy(dev_id, vchan,
247 rte_pktmbuf_iova(src),
248 rte_pktmbuf_iova(dst),
249 COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++)
250 ERR_RETURN("Error with rte_dma_copy\n");
251
252 await_hw(dev_id, vchan);
253
254 count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL);
255 if (count != max_ops)
256 ERR_RETURN("Error with rte_dma_completed, got %u not %u\n",
257 count, max_ops);
258
259 if (id != id_count - 1)
260 ERR_RETURN("Error, incorrect job id returned: got %u not %u\n",
261 id, id_count - 1);
262
263 for (i = 0; i < COPY_LEN; i++)
264 if (dst_data[i] != src_data[i])
265 ERR_RETURN("Data mismatch at char %u\n", i);
266
267 rte_pktmbuf_free(src);
268 rte_pktmbuf_free(dst);
269 } while (0);
270
271 /* test doing multiple copies */
272 return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */
273 /* enqueue 2 batches and then complete both */
274 || do_multi_copies(dev_id, vchan, 1, 0, 0)
275 /* enqueue 1 batch, then complete in two halves */
276 || do_multi_copies(dev_id, vchan, 0, 1, 0)
277 /* test using completed_status in place of regular completed API */
278 || do_multi_copies(dev_id, vchan, 0, 0, 1);
279 }
280
281 /* Failure handling test cases - global macros and variables for those tests*/
282 #define COMP_BURST_SZ 16
283 #define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0)
284
285 static int
test_failure_in_full_burst(int16_t dev_id,uint16_t vchan,bool fence,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,unsigned int fail_idx)286 test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence,
287 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
288 {
289 /* Test single full batch statuses with failures */
290 enum rte_dma_status_code status[COMP_BURST_SZ];
291 struct rte_dma_stats baseline, stats;
292 uint16_t invalid_addr_id = 0;
293 uint16_t idx;
294 uint16_t count, status_count;
295 unsigned int i;
296 bool error = false;
297 int err_count = 0;
298
299 rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */
300 for (i = 0; i < COMP_BURST_SZ; i++) {
301 int id = rte_dma_copy(dev_id, vchan,
302 (i == fail_idx ? 0 : (srcs[i]->buf_iova + srcs[i]->data_off)),
303 dsts[i]->buf_iova + dsts[i]->data_off,
304 COPY_LEN, OPT_FENCE(i));
305 if (id < 0)
306 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
307 if (i == fail_idx)
308 invalid_addr_id = id;
309 }
310 rte_dma_submit(dev_id, vchan);
311 rte_dma_stats_get(dev_id, vchan, &stats);
312 if (stats.submitted != baseline.submitted + COMP_BURST_SZ)
313 ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n",
314 stats.submitted, baseline.submitted + COMP_BURST_SZ);
315
316 await_hw(dev_id, vchan);
317
318 count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
319 if (count != fail_idx)
320 ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
321 count, fail_idx);
322 if (!error)
323 ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
324 fail_idx);
325 if (idx != invalid_addr_id - 1)
326 ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
327 fail_idx, idx, invalid_addr_id - 1);
328
329 /* all checks ok, now verify calling completed() again always returns 0 */
330 for (i = 0; i < 10; i++)
331 if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0
332 || error == false || idx != (invalid_addr_id - 1))
333 ERR_RETURN("Error with follow-up completed calls for fail idx %u\n",
334 fail_idx);
335
336 status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ,
337 &idx, status);
338 /* some HW may stop on error and be restarted after getting error status for single value
339 * To handle this case, if we get just one error back, wait for more completions and get
340 * status for rest of the burst
341 */
342 if (status_count == 1) {
343 await_hw(dev_id, vchan);
344 status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1,
345 &idx, &status[1]);
346 }
347 /* check that at this point we have all status values */
348 if (status_count != COMP_BURST_SZ - count)
349 ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
350 fail_idx, status_count, COMP_BURST_SZ - count);
351 /* now verify just one failure followed by multiple successful or skipped entries */
352 if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
353 ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
354 fail_idx);
355 for (i = 1; i < status_count; i++)
356 /* after a failure in a burst, depending on ordering/fencing,
357 * operations may be successful or skipped because of previous error.
358 */
359 if (status[i] != RTE_DMA_STATUS_SUCCESSFUL
360 && status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED)
361 ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n",
362 fail_idx, count + i, COMP_BURST_SZ);
363
364 /* check the completed + errors stats are as expected */
365 rte_dma_stats_get(dev_id, vchan, &stats);
366 if (stats.completed != baseline.completed + COMP_BURST_SZ)
367 ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n",
368 stats.completed, baseline.completed + COMP_BURST_SZ);
369 for (i = 0; i < status_count; i++)
370 err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL);
371 if (stats.errors != baseline.errors + err_count)
372 ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n",
373 stats.errors, baseline.errors + err_count);
374
375 return 0;
376 }
377
378 static int
test_individual_status_query_with_failure(int16_t dev_id,uint16_t vchan,bool fence,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,unsigned int fail_idx)379 test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence,
380 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
381 {
382 /* Test gathering batch statuses one at a time */
383 enum rte_dma_status_code status[COMP_BURST_SZ];
384 uint16_t invalid_addr_id = 0;
385 uint16_t idx;
386 uint16_t count = 0, status_count = 0;
387 unsigned int j;
388 bool error = false;
389
390 for (j = 0; j < COMP_BURST_SZ; j++) {
391 int id = rte_dma_copy(dev_id, vchan,
392 (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)),
393 dsts[j]->buf_iova + dsts[j]->data_off,
394 COPY_LEN, OPT_FENCE(j));
395 if (id < 0)
396 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
397 if (j == fail_idx)
398 invalid_addr_id = id;
399 }
400 rte_dma_submit(dev_id, vchan);
401 await_hw(dev_id, vchan);
402
403 /* use regular "completed" until we hit error */
404 while (!error) {
405 uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error);
406 count += n;
407 if (n > 1 || count >= COMP_BURST_SZ)
408 ERR_RETURN("Error - too many completions got\n");
409 if (n == 0 && !error)
410 ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n",
411 count);
412 }
413 if (idx != invalid_addr_id - 1)
414 ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n",
415 idx, invalid_addr_id - 1);
416
417 /* use completed_status until we hit end of burst */
418 while (count + status_count < COMP_BURST_SZ) {
419 uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx,
420 &status[status_count]);
421 await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */
422 status_count += n;
423 if (n != 1)
424 ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n",
425 n);
426 }
427
428 /* check for single failure */
429 if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
430 ERR_RETURN("Error, unexpected successful DMA transaction\n");
431 for (j = 1; j < status_count; j++)
432 if (status[j] != RTE_DMA_STATUS_SUCCESSFUL
433 && status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED)
434 ERR_RETURN("Error, unexpected DMA error reported\n");
435
436 return 0;
437 }
438
439 static int
test_single_item_status_query_with_failure(int16_t dev_id,uint16_t vchan,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,unsigned int fail_idx)440 test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan,
441 struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
442 {
443 /* When error occurs just collect a single error using "completed_status()"
444 * before going to back to completed() calls
445 */
446 enum rte_dma_status_code status;
447 uint16_t invalid_addr_id = 0;
448 uint16_t idx;
449 uint16_t count, status_count, count2;
450 unsigned int j;
451 bool error = false;
452
453 for (j = 0; j < COMP_BURST_SZ; j++) {
454 int id = rte_dma_copy(dev_id, vchan,
455 (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)),
456 dsts[j]->buf_iova + dsts[j]->data_off,
457 COPY_LEN, 0);
458 if (id < 0)
459 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
460 if (j == fail_idx)
461 invalid_addr_id = id;
462 }
463 rte_dma_submit(dev_id, vchan);
464 await_hw(dev_id, vchan);
465
466 /* get up to the error point */
467 count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
468 if (count != fail_idx)
469 ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
470 count, fail_idx);
471 if (!error)
472 ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
473 fail_idx);
474 if (idx != invalid_addr_id - 1)
475 ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
476 fail_idx, idx, invalid_addr_id - 1);
477
478 /* get the error code */
479 status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status);
480 if (status_count != 1)
481 ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
482 fail_idx, status_count, COMP_BURST_SZ - count);
483 if (status == RTE_DMA_STATUS_SUCCESSFUL)
484 ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
485 fail_idx);
486
487 /* delay in case time needed after err handled to complete other jobs */
488 await_hw(dev_id, vchan);
489
490 /* get the rest of the completions without status */
491 count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
492 if (error == true)
493 ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n",
494 fail_idx);
495 if (count + status_count + count2 != COMP_BURST_SZ)
496 ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n",
497 count + status_count + count2, COMP_BURST_SZ);
498
499 return 0;
500 }
501
502 static int
test_multi_failure(int16_t dev_id,uint16_t vchan,struct rte_mbuf ** srcs,struct rte_mbuf ** dsts,const unsigned int * fail,size_t num_fail)503 test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts,
504 const unsigned int *fail, size_t num_fail)
505 {
506 /* test having multiple errors in one go */
507 enum rte_dma_status_code status[COMP_BURST_SZ];
508 unsigned int i, j;
509 uint16_t count, err_count = 0;
510 bool error = false;
511
512 /* enqueue and gather completions in one go */
513 for (j = 0; j < COMP_BURST_SZ; j++) {
514 uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off;
515 /* set up for failure if the current index is anywhere is the fails array */
516 for (i = 0; i < num_fail; i++)
517 if (j == fail[i])
518 src = 0;
519
520 int id = rte_dma_copy(dev_id, vchan,
521 src, dsts[j]->buf_iova + dsts[j]->data_off,
522 COPY_LEN, 0);
523 if (id < 0)
524 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
525 }
526 rte_dma_submit(dev_id, vchan);
527 await_hw(dev_id, vchan);
528
529 count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status);
530 while (count < COMP_BURST_SZ) {
531 await_hw(dev_id, vchan);
532
533 uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count,
534 NULL, &status[count]);
535 if (ret == 0)
536 ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n",
537 count, COMP_BURST_SZ);
538 count += ret;
539 }
540 for (i = 0; i < count; i++)
541 if (status[i] != RTE_DMA_STATUS_SUCCESSFUL)
542 err_count++;
543
544 if (err_count != num_fail)
545 ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n",
546 err_count, num_fail);
547
548 /* enqueue and gather completions in bursts, but getting errors one at a time */
549 for (j = 0; j < COMP_BURST_SZ; j++) {
550 uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off;
551 /* set up for failure if the current index is anywhere is the fails array */
552 for (i = 0; i < num_fail; i++)
553 if (j == fail[i])
554 src = 0;
555
556 int id = rte_dma_copy(dev_id, vchan,
557 src, dsts[j]->buf_iova + dsts[j]->data_off,
558 COPY_LEN, 0);
559 if (id < 0)
560 ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
561 }
562 rte_dma_submit(dev_id, vchan);
563 await_hw(dev_id, vchan);
564
565 count = 0;
566 err_count = 0;
567 while (count + err_count < COMP_BURST_SZ) {
568 count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error);
569 if (error) {
570 uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1,
571 NULL, status);
572 if (ret != 1)
573 ERR_RETURN("Error getting error-status for completions\n");
574 err_count += ret;
575 await_hw(dev_id, vchan);
576 }
577 }
578 if (err_count != num_fail)
579 ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n",
580 err_count, num_fail);
581
582 return 0;
583 }
584
585 static int
test_completion_status(int16_t dev_id,uint16_t vchan,bool fence)586 test_completion_status(int16_t dev_id, uint16_t vchan, bool fence)
587 {
588 const unsigned int fail[] = {0, 7, 14, 15};
589 struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
590 unsigned int i;
591
592 for (i = 0; i < COMP_BURST_SZ; i++) {
593 srcs[i] = rte_pktmbuf_alloc(pool);
594 dsts[i] = rte_pktmbuf_alloc(pool);
595 }
596
597 for (i = 0; i < RTE_DIM(fail); i++) {
598 if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0)
599 return -1;
600
601 if (test_individual_status_query_with_failure(dev_id, vchan, fence,
602 srcs, dsts, fail[i]) < 0)
603 return -1;
604
605 /* test is run the same fenced, or unfenced, but no harm in running it twice */
606 if (test_single_item_status_query_with_failure(dev_id, vchan,
607 srcs, dsts, fail[i]) < 0)
608 return -1;
609 }
610
611 if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0)
612 return -1;
613
614 for (i = 0; i < COMP_BURST_SZ; i++) {
615 rte_pktmbuf_free(srcs[i]);
616 rte_pktmbuf_free(dsts[i]);
617 }
618 return 0;
619 }
620
621 static int
test_completion_handling(int16_t dev_id,uint16_t vchan)622 test_completion_handling(int16_t dev_id, uint16_t vchan)
623 {
624 return test_completion_status(dev_id, vchan, false) /* without fences */
625 || test_completion_status(dev_id, vchan, true); /* with fences */
626 }
627
628 static int
test_enqueue_fill(int16_t dev_id,uint16_t vchan)629 test_enqueue_fill(int16_t dev_id, uint16_t vchan)
630 {
631 const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
632 struct rte_mbuf *dst;
633 char *dst_data;
634 uint64_t pattern = 0xfedcba9876543210;
635 unsigned int i, j;
636
637 dst = rte_pktmbuf_alloc(pool);
638 if (dst == NULL)
639 ERR_RETURN("Failed to allocate mbuf\n");
640 dst_data = rte_pktmbuf_mtod(dst, char *);
641
642 for (i = 0; i < RTE_DIM(lengths); i++) {
643 /* reset dst_data */
644 memset(dst_data, 0, rte_pktmbuf_data_len(dst));
645
646 /* perform the fill operation */
647 int id = rte_dma_fill(dev_id, vchan, pattern,
648 rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT);
649 if (id < 0)
650 ERR_RETURN("Error with rte_dma_fill\n");
651 await_hw(dev_id, vchan);
652
653 if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1)
654 ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]);
655 /* check the data from the fill operation is correct */
656 for (j = 0; j < lengths[i]; j++) {
657 char pat_byte = ((char *)&pattern)[j % 8];
658 if (dst_data[j] != pat_byte)
659 ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
660 lengths[i], dst_data[j], pat_byte);
661 }
662 /* check that the data after the fill operation was not written to */
663 for (; j < rte_pktmbuf_data_len(dst); j++)
664 if (dst_data[j] != 0)
665 ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n",
666 lengths[i], dst_data[j], 0);
667 }
668
669 rte_pktmbuf_free(dst);
670 return 0;
671 }
672
673 static int
test_burst_capacity(int16_t dev_id,uint16_t vchan)674 test_burst_capacity(int16_t dev_id, uint16_t vchan)
675 {
676 #define CAP_TEST_BURST_SIZE 64
677 const int ring_space = rte_dma_burst_capacity(dev_id, vchan);
678 struct rte_mbuf *src, *dst;
679 int i, j, iter;
680 int cap, ret;
681 bool dma_err;
682
683 src = rte_pktmbuf_alloc(pool);
684 dst = rte_pktmbuf_alloc(pool);
685
686 /* to test capacity, we enqueue elements and check capacity is reduced
687 * by one each time - rebaselining the expected value after each burst
688 * as the capacity is only for a burst. We enqueue multiple bursts to
689 * fill up half the ring, before emptying it again. We do this multiple
690 * times to ensure that we get to test scenarios where we get ring
691 * wrap-around and wrap-around of the ids returned (at UINT16_MAX).
692 */
693 for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) {
694 for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
695 cap = rte_dma_burst_capacity(dev_id, vchan);
696
697 for (j = 0; j < CAP_TEST_BURST_SIZE; j++) {
698 ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src),
699 rte_pktmbuf_iova(dst), COPY_LEN, 0);
700 if (ret < 0)
701 ERR_RETURN("Error with rte_dmadev_copy\n");
702
703 if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1))
704 ERR_RETURN("Error, ring capacity did not change as expected\n");
705 }
706 if (rte_dma_submit(dev_id, vchan) < 0)
707 ERR_RETURN("Error, failed to submit burst\n");
708
709 if (cap < rte_dma_burst_capacity(dev_id, vchan))
710 ERR_RETURN("Error, avail ring capacity has gone up, not down\n");
711 }
712 await_hw(dev_id, vchan);
713
714 for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
715 ret = rte_dma_completed(dev_id, vchan,
716 CAP_TEST_BURST_SIZE, NULL, &dma_err);
717 if (ret != CAP_TEST_BURST_SIZE || dma_err) {
718 enum rte_dma_status_code status;
719
720 rte_dma_completed_status(dev_id, vchan, 1, NULL, &status);
721 ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n",
722 ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status);
723 }
724 }
725 cap = rte_dma_burst_capacity(dev_id, vchan);
726 if (cap != ring_space)
727 ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n",
728 cap, ring_space);
729 }
730
731 rte_pktmbuf_free(src);
732 rte_pktmbuf_free(dst);
733
734 return 0;
735 }
736
737 static int
test_dmadev_instance(int16_t dev_id)738 test_dmadev_instance(int16_t dev_id)
739 {
740 #define TEST_RINGSIZE 512
741 #define CHECK_ERRS true
742 struct rte_dma_stats stats;
743 struct rte_dma_info info;
744 const struct rte_dma_conf conf = { .nb_vchans = 1};
745 const struct rte_dma_vchan_conf qconf = {
746 .direction = RTE_DMA_DIR_MEM_TO_MEM,
747 .nb_desc = TEST_RINGSIZE,
748 };
749 const int vchan = 0;
750 int ret;
751
752 ret = rte_dma_info_get(dev_id, &info);
753 if (ret != 0)
754 ERR_RETURN("Error with rte_dma_info_get()\n");
755
756 printf("\n### Test dmadev instance %u [%s]\n",
757 dev_id, info.dev_name);
758
759 if (info.max_vchans < 1)
760 ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
761
762 if (rte_dma_configure(dev_id, &conf) != 0)
763 ERR_RETURN("Error with rte_dma_configure()\n");
764
765 if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
766 ERR_RETURN("Error with queue configuration\n");
767
768 ret = rte_dma_info_get(dev_id, &info);
769 if (ret != 0 || info.nb_vchans != 1)
770 ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
771
772 if (rte_dma_start(dev_id) != 0)
773 ERR_RETURN("Error with rte_dma_start()\n");
774
775 if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
776 ERR_RETURN("Error with rte_dma_stats_get()\n");
777
778 if (rte_dma_burst_capacity(dev_id, vchan) < 32)
779 ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
780
781 if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
782 ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
783 "submitted = %"PRIu64", errors = %"PRIu64"\n",
784 stats.completed, stats.submitted, stats.errors);
785 id_count = 0;
786
787 /* create a mempool for running tests */
788 pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
789 TEST_RINGSIZE * 2, /* n == num elements */
790 32, /* cache size */
791 0, /* priv size */
792 2048, /* data room size */
793 info.numa_node);
794 if (pool == NULL)
795 ERR_RETURN("Error with mempool creation\n");
796
797 /* run the test cases, use many iterations to ensure UINT16_MAX id wraparound */
798 if (runtest("copy", test_enqueue_copies, 640, dev_id, vchan, CHECK_ERRS) < 0)
799 goto err;
800
801 /* run some burst capacity tests */
802 if (rte_dma_burst_capacity(dev_id, vchan) < 64)
803 printf("DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
804 dev_id);
805 else if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0)
806 goto err;
807
808 /* to test error handling we can provide null pointers for source or dest in copies. This
809 * requires VA mode in DPDK, since NULL(0) is a valid physical address.
810 * We also need hardware that can report errors back.
811 */
812 if (rte_eal_iova_mode() != RTE_IOVA_VA)
813 printf("DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n", dev_id);
814 else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0)
815 printf("DMA Dev %u: device does not report errors, skipping error handling tests\n",
816 dev_id);
817 else if (runtest("error handling", test_completion_handling, 1,
818 dev_id, vchan, !CHECK_ERRS) < 0)
819 goto err;
820
821 if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0)
822 printf("DMA Dev %u: No device fill support, skipping fill tests\n", dev_id);
823 else if (runtest("fill", test_enqueue_fill, 1, dev_id, vchan, CHECK_ERRS) < 0)
824 goto err;
825
826 rte_mempool_free(pool);
827 rte_dma_stop(dev_id);
828 rte_dma_stats_reset(dev_id, vchan);
829 return 0;
830
831 err:
832 rte_mempool_free(pool);
833 rte_dma_stop(dev_id);
834 return -1;
835 }
836
837 static int
test_apis(void)838 test_apis(void)
839 {
840 const char *pmd = "dma_skeleton";
841 int id;
842 int ret;
843
844 /* attempt to create skeleton instance - ignore errors due to one being already present */
845 rte_vdev_init(pmd, NULL);
846 id = rte_dma_get_dev_id_by_name(pmd);
847 if (id < 0)
848 return TEST_SKIPPED;
849 printf("\n### Test dmadev infrastructure using skeleton driver\n");
850 ret = test_dma_api(id);
851
852 return ret;
853 }
854
855 static int
test_dma(void)856 test_dma(void)
857 {
858 int i;
859
860 /* basic sanity on dmadev infrastructure */
861 if (test_apis() < 0)
862 ERR_RETURN("Error performing API tests\n");
863
864 if (rte_dma_count_avail() == 0)
865 return TEST_SKIPPED;
866
867 RTE_DMA_FOREACH_DEV(i)
868 if (test_dmadev_instance(i) < 0)
869 ERR_RETURN("Error, test failure for device %d\n", i);
870
871 return 0;
872 }
873
874 REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);
875