1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2011 Spectra Logic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGES.
31 *
32 * Authors: Justin T. Gibbs (Spectra Logic Corporation)
33 * Alan Somers (Spectra Logic Corporation)
34 * John Suykerbuyk (Spectra Logic Corporation)
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 /**
41 * \file netback_unit_tests.c
42 *
43 * \brief Unit tests for the Xen netback driver.
44 *
45 * Due to the driver's use of static functions, these tests cannot be compiled
46 * standalone; they must be #include'd from the driver's .c file.
47 */
48
49
50 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
51 #define SNCATF(buffer, buflen, ...) do { \
52 size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \
53 buffer += new_chars; \
54 /* be careful; snprintf's return value can be > buflen */ \
55 buflen -= MIN(buflen, new_chars); \
56 } while (0)
57
58 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
59 #define STRINGIFY(x) #x
60 #define TOSTRING(x) STRINGIFY(x)
61
62 /**
63 * Writes an error message to buffer if cond is false
64 * Note the implied parameters buffer and
65 * buflen
66 */
67 #define XNB_ASSERT(cond) ({ \
68 int passed = (cond); \
69 char *_buffer = (buffer); \
70 size_t _buflen = (buflen); \
71 if (! passed) { \
72 strlcat(_buffer, __func__, _buflen); \
73 strlcat(_buffer, ":" TOSTRING(__LINE__) \
74 " Assertion Error: " #cond "\n", _buflen); \
75 } \
76 })
77
78
79 /**
80 * The signature used by all testcases. If the test writes anything
81 * to buffer, then it will be considered a failure
82 * \param buffer Return storage for error messages
83 * \param buflen The space available in the buffer
84 */
85 typedef void testcase_t(char *buffer, size_t buflen);
86
87 /**
88 * Signature used by setup functions
89 * \return nonzero on error
90 */
91 typedef int setup_t(void);
92
93 typedef void teardown_t(void);
94
95 /** A simple test fixture comprising setup, teardown, and test */
96 struct test_fixture {
97 /** Will be run before the test to allocate and initialize variables */
98 setup_t *setup;
99
100 /** Will be run if setup succeeds */
101 testcase_t *test;
102
103 /** Cleans up test data whether or not the setup succeeded */
104 teardown_t *teardown;
105 };
106
107 typedef struct test_fixture test_fixture_t;
108
109 static int xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
110 static int xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
111 char *buffer, size_t buflen);
112
113 static int __unused
null_setup(void)114 null_setup(void) { return 0; }
115
116 static void __unused
null_teardown(void)117 null_teardown(void) { }
118
119 static setup_t setup_pvt_data;
120 static teardown_t teardown_pvt_data;
121 static testcase_t xnb_ring2pkt_emptyring;
122 static testcase_t xnb_ring2pkt_1req;
123 static testcase_t xnb_ring2pkt_2req;
124 static testcase_t xnb_ring2pkt_3req;
125 static testcase_t xnb_ring2pkt_extra;
126 static testcase_t xnb_ring2pkt_partial;
127 static testcase_t xnb_ring2pkt_wraps;
128 static testcase_t xnb_txpkt2rsp_emptypkt;
129 static testcase_t xnb_txpkt2rsp_1req;
130 static testcase_t xnb_txpkt2rsp_extra;
131 static testcase_t xnb_txpkt2rsp_long;
132 static testcase_t xnb_txpkt2rsp_invalid;
133 static testcase_t xnb_txpkt2rsp_error;
134 static testcase_t xnb_txpkt2rsp_wraps;
135 static testcase_t xnb_pkt2mbufc_empty;
136 static testcase_t xnb_pkt2mbufc_short;
137 static testcase_t xnb_pkt2mbufc_csum;
138 static testcase_t xnb_pkt2mbufc_1cluster;
139 static testcase_t xnb_pkt2mbufc_largecluster;
140 static testcase_t xnb_pkt2mbufc_2cluster;
141 static testcase_t xnb_txpkt2gnttab_empty;
142 static testcase_t xnb_txpkt2gnttab_short;
143 static testcase_t xnb_txpkt2gnttab_2req;
144 static testcase_t xnb_txpkt2gnttab_2cluster;
145 static testcase_t xnb_update_mbufc_short;
146 static testcase_t xnb_update_mbufc_2req;
147 static testcase_t xnb_update_mbufc_2cluster;
148 static testcase_t xnb_mbufc2pkt_empty;
149 static testcase_t xnb_mbufc2pkt_short;
150 static testcase_t xnb_mbufc2pkt_1cluster;
151 static testcase_t xnb_mbufc2pkt_2short;
152 static testcase_t xnb_mbufc2pkt_long;
153 static testcase_t xnb_mbufc2pkt_extra;
154 static testcase_t xnb_mbufc2pkt_nospace;
155 static testcase_t xnb_rxpkt2gnttab_empty;
156 static testcase_t xnb_rxpkt2gnttab_short;
157 static testcase_t xnb_rxpkt2gnttab_2req;
158 static testcase_t xnb_rxpkt2rsp_empty;
159 static testcase_t xnb_rxpkt2rsp_short;
160 static testcase_t xnb_rxpkt2rsp_extra;
161 static testcase_t xnb_rxpkt2rsp_2short;
162 static testcase_t xnb_rxpkt2rsp_2slots;
163 static testcase_t xnb_rxpkt2rsp_copyerror;
164 static testcase_t xnb_sscanf_llu;
165 static testcase_t xnb_sscanf_lld;
166 static testcase_t xnb_sscanf_hhu;
167 static testcase_t xnb_sscanf_hhd;
168 static testcase_t xnb_sscanf_hhn;
169
170 #if defined(INET) || defined(INET6)
171 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
172 static testcase_t xnb_add_mbuf_cksum_arp;
173 static testcase_t xnb_add_mbuf_cksum_tcp;
174 static testcase_t xnb_add_mbuf_cksum_udp;
175 static testcase_t xnb_add_mbuf_cksum_icmp;
176 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
177 static void xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
178 uint16_t ip_id, uint16_t ip_p,
179 uint16_t ip_off, uint16_t ip_sum);
180 static void xnb_fill_tcp(struct mbuf *m);
181 #endif /* INET || INET6 */
182
183 /** Private data used by unit tests */
184 static struct {
185 gnttab_copy_table gnttab;
186 netif_rx_back_ring_t rxb;
187 netif_rx_front_ring_t rxf;
188 netif_tx_back_ring_t txb;
189 netif_tx_front_ring_t txf;
190 struct ifnet* ifp;
191 netif_rx_sring_t* rxs;
192 netif_tx_sring_t* txs;
193 } xnb_unit_pvt;
194
safe_m_freem(struct mbuf ** ppMbuf)195 static inline void safe_m_freem(struct mbuf **ppMbuf) {
196 if (*ppMbuf != NULL) {
197 m_freem(*ppMbuf);
198 *ppMbuf = NULL;
199 }
200 }
201
202 /**
203 * The unit test runner. It will run every supplied test and return an
204 * output message as a string
205 * \param tests An array of tests. Every test will be attempted.
206 * \param ntests The length of tests
207 * \param buffer Return storage for the result string
208 * \param buflen The length of buffer
209 * \return The number of tests that failed
210 */
211 static int
xnb_unit_test_runner(test_fixture_t const tests[],int ntests,char * buffer,size_t buflen)212 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
213 size_t buflen)
214 {
215 int i;
216 int n_passes;
217 int n_failures = 0;
218
219 for (i = 0; i < ntests; i++) {
220 int error = tests[i].setup();
221 if (error != 0) {
222 SNCATF(buffer, buflen,
223 "Setup failed for test idx %d\n", i);
224 n_failures++;
225 } else {
226 size_t new_chars;
227
228 tests[i].test(buffer, buflen);
229 new_chars = strnlen(buffer, buflen);
230 buffer += new_chars;
231 buflen -= new_chars;
232
233 if (new_chars > 0) {
234 n_failures++;
235 }
236 }
237 tests[i].teardown();
238 }
239
240 n_passes = ntests - n_failures;
241 if (n_passes > 0) {
242 SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
243 }
244 if (n_failures > 0) {
245 SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
246 }
247
248 return n_failures;
249 }
250
251 /** Number of unit tests. Must match the length of the tests array below */
252 #define TOTAL_TESTS (53)
253 /**
254 * Max memory available for returning results. 400 chars/test should give
255 * enough space for a five line error message for every test
256 */
257 #define TOTAL_BUFLEN (400 * TOTAL_TESTS + 2)
258
259 /**
260 * Called from userspace by a sysctl. Runs all internal unit tests, and
261 * returns the results to userspace as a string
262 * \param oidp unused
263 * \param arg1 pointer to an xnb_softc for a specific xnb device
264 * \param arg2 unused
265 * \param req sysctl access structure
266 * \return a string via the special SYSCTL_OUT macro.
267 */
268
269 static int
xnb_unit_test_main(SYSCTL_HANDLER_ARGS)270 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
271 test_fixture_t const tests[TOTAL_TESTS] = {
272 {setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
273 {setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
274 {setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
275 {setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
276 {setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
277 {setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
278 {setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
279 {setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
280 {setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
281 {setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
282 {setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
283 {setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
284 {setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
285 {setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
286 {setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
287 {setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
288 {setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
289 {setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
290 {setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
291 {setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
292 {setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
293 {setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
294 {setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
295 {setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
296 {setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
297 {setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
298 {setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
299 {setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
300 {setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
301 {setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
302 {setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
303 {setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
304 {setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
305 {setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
306 {setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
307 {setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
308 {setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
309 {setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
310 {setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
311 {setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
312 {setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
313 {setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
314 {setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
315 #if defined(INET) || defined(INET6)
316 {null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
317 {null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
318 {null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
319 {null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
320 {null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
321 #endif
322 {null_setup, xnb_sscanf_hhd, null_teardown},
323 {null_setup, xnb_sscanf_hhu, null_teardown},
324 {null_setup, xnb_sscanf_lld, null_teardown},
325 {null_setup, xnb_sscanf_llu, null_teardown},
326 {null_setup, xnb_sscanf_hhn, null_teardown},
327 };
328 /**
329 * results is static so that the data will persist after this function
330 * returns. The sysctl code expects us to return a constant string.
331 * \todo: the static variable is not thread safe. Put a mutex around
332 * it.
333 */
334 static char results[TOTAL_BUFLEN];
335
336 /* empty the result strings */
337 results[0] = 0;
338 xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
339
340 return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
341 }
342
343 static int
setup_pvt_data(void)344 setup_pvt_data(void)
345 {
346 int error = 0;
347
348 bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
349
350 xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
351 if (xnb_unit_pvt.txs != NULL) {
352 SHARED_RING_INIT(xnb_unit_pvt.txs);
353 BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
354 FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
355 } else {
356 error = 1;
357 }
358
359 xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
360 if (xnb_unit_pvt.ifp == NULL) {
361 error = 1;
362 }
363
364 xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
365 if (xnb_unit_pvt.rxs != NULL) {
366 SHARED_RING_INIT(xnb_unit_pvt.rxs);
367 BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
368 FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
369 } else {
370 error = 1;
371 }
372
373 return error;
374 }
375
376 static void
teardown_pvt_data(void)377 teardown_pvt_data(void)
378 {
379 if (xnb_unit_pvt.txs != NULL) {
380 free(xnb_unit_pvt.txs, M_XENNETBACK);
381 }
382 if (xnb_unit_pvt.rxs != NULL) {
383 free(xnb_unit_pvt.rxs, M_XENNETBACK);
384 }
385 if (xnb_unit_pvt.ifp != NULL) {
386 if_free(xnb_unit_pvt.ifp);
387 }
388 }
389
390 /**
391 * Verify that xnb_ring2pkt will not consume any requests from an empty ring
392 */
393 static void
xnb_ring2pkt_emptyring(char * buffer,size_t buflen)394 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
395 {
396 struct xnb_pkt pkt;
397 int num_consumed;
398
399 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
400 xnb_unit_pvt.txb.req_cons);
401 XNB_ASSERT(num_consumed == 0);
402 }
403
404 /**
405 * Verify that xnb_ring2pkt can convert a single request packet correctly
406 */
407 static void
xnb_ring2pkt_1req(char * buffer,size_t buflen)408 xnb_ring2pkt_1req(char *buffer, size_t buflen)
409 {
410 struct xnb_pkt pkt;
411 int num_consumed;
412 struct netif_tx_request *req;
413
414 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
415 xnb_unit_pvt.txf.req_prod_pvt);
416
417 req->flags = 0;
418 req->size = 69; /* arbitrary number for test */
419 xnb_unit_pvt.txf.req_prod_pvt++;
420
421 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
422
423 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
424 xnb_unit_pvt.txb.req_cons);
425 XNB_ASSERT(num_consumed == 1);
426 XNB_ASSERT(pkt.size == 69);
427 XNB_ASSERT(pkt.car_size == 69);
428 XNB_ASSERT(pkt.flags == 0);
429 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
430 XNB_ASSERT(pkt.list_len == 1);
431 XNB_ASSERT(pkt.car == 0);
432 }
433
434 /**
435 * Verify that xnb_ring2pkt can convert a two request packet correctly.
436 * This tests handling of the MORE_DATA flag and cdr
437 */
438 static void
xnb_ring2pkt_2req(char * buffer,size_t buflen)439 xnb_ring2pkt_2req(char *buffer, size_t buflen)
440 {
441 struct xnb_pkt pkt;
442 int num_consumed;
443 struct netif_tx_request *req;
444 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
445
446 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
447 xnb_unit_pvt.txf.req_prod_pvt);
448 req->flags = NETTXF_more_data;
449 req->size = 100;
450 xnb_unit_pvt.txf.req_prod_pvt++;
451
452 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
453 xnb_unit_pvt.txf.req_prod_pvt);
454 req->flags = 0;
455 req->size = 40;
456 xnb_unit_pvt.txf.req_prod_pvt++;
457
458 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
459
460 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
461 xnb_unit_pvt.txb.req_cons);
462 XNB_ASSERT(num_consumed == 2);
463 XNB_ASSERT(pkt.size == 100);
464 XNB_ASSERT(pkt.car_size == 60);
465 XNB_ASSERT(pkt.flags == 0);
466 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
467 XNB_ASSERT(pkt.list_len == 2);
468 XNB_ASSERT(pkt.car == start_idx);
469 XNB_ASSERT(pkt.cdr == start_idx + 1);
470 }
471
472 /**
473 * Verify that xnb_ring2pkt can convert a three request packet correctly
474 */
475 static void
xnb_ring2pkt_3req(char * buffer,size_t buflen)476 xnb_ring2pkt_3req(char *buffer, size_t buflen)
477 {
478 struct xnb_pkt pkt;
479 int num_consumed;
480 struct netif_tx_request *req;
481 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
482
483 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
484 xnb_unit_pvt.txf.req_prod_pvt);
485 req->flags = NETTXF_more_data;
486 req->size = 200;
487 xnb_unit_pvt.txf.req_prod_pvt++;
488
489 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
490 xnb_unit_pvt.txf.req_prod_pvt);
491 req->flags = NETTXF_more_data;
492 req->size = 40;
493 xnb_unit_pvt.txf.req_prod_pvt++;
494
495 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
496 xnb_unit_pvt.txf.req_prod_pvt);
497 req->flags = 0;
498 req->size = 50;
499 xnb_unit_pvt.txf.req_prod_pvt++;
500
501 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
502
503 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
504 xnb_unit_pvt.txb.req_cons);
505 XNB_ASSERT(num_consumed == 3);
506 XNB_ASSERT(pkt.size == 200);
507 XNB_ASSERT(pkt.car_size == 110);
508 XNB_ASSERT(pkt.flags == 0);
509 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
510 XNB_ASSERT(pkt.list_len == 3);
511 XNB_ASSERT(pkt.car == start_idx);
512 XNB_ASSERT(pkt.cdr == start_idx + 1);
513 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
514 }
515
516 /**
517 * Verify that xnb_ring2pkt can read extra inf
518 */
519 static void
xnb_ring2pkt_extra(char * buffer,size_t buflen)520 xnb_ring2pkt_extra(char *buffer, size_t buflen)
521 {
522 struct xnb_pkt pkt;
523 int num_consumed;
524 struct netif_tx_request *req;
525 struct netif_extra_info *ext;
526 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
527
528 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
529 xnb_unit_pvt.txf.req_prod_pvt);
530 req->flags = NETTXF_extra_info | NETTXF_more_data;
531 req->size = 150;
532 xnb_unit_pvt.txf.req_prod_pvt++;
533
534 ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
535 xnb_unit_pvt.txf.req_prod_pvt);
536 ext->flags = 0;
537 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
538 ext->u.gso.size = 250;
539 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
540 ext->u.gso.features = 0;
541 xnb_unit_pvt.txf.req_prod_pvt++;
542
543 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
544 xnb_unit_pvt.txf.req_prod_pvt);
545 req->flags = 0;
546 req->size = 50;
547 xnb_unit_pvt.txf.req_prod_pvt++;
548
549 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
550
551 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
552 xnb_unit_pvt.txb.req_cons);
553 XNB_ASSERT(num_consumed == 3);
554 XNB_ASSERT(pkt.extra.flags == 0);
555 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
556 XNB_ASSERT(pkt.extra.u.gso.size == 250);
557 XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
558 XNB_ASSERT(pkt.size == 150);
559 XNB_ASSERT(pkt.car_size == 100);
560 XNB_ASSERT(pkt.flags == NETTXF_extra_info);
561 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
562 XNB_ASSERT(pkt.list_len == 2);
563 XNB_ASSERT(pkt.car == start_idx);
564 XNB_ASSERT(pkt.cdr == start_idx + 2);
565 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
566 }
567
568 /**
569 * Verify that xnb_ring2pkt will consume no requests if the entire packet is
570 * not yet in the ring
571 */
572 static void
xnb_ring2pkt_partial(char * buffer,size_t buflen)573 xnb_ring2pkt_partial(char *buffer, size_t buflen)
574 {
575 struct xnb_pkt pkt;
576 int num_consumed;
577 struct netif_tx_request *req;
578
579 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
580 xnb_unit_pvt.txf.req_prod_pvt);
581 req->flags = NETTXF_more_data;
582 req->size = 150;
583 xnb_unit_pvt.txf.req_prod_pvt++;
584
585 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
586
587 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
588 xnb_unit_pvt.txb.req_cons);
589 XNB_ASSERT(num_consumed == 0);
590 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
591 }
592
593 /**
594 * Verity that xnb_ring2pkt can read a packet whose requests wrap around
595 * the end of the ring
596 */
597 static void
xnb_ring2pkt_wraps(char * buffer,size_t buflen)598 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
599 {
600 struct xnb_pkt pkt;
601 int num_consumed;
602 struct netif_tx_request *req;
603 unsigned int rsize;
604
605 /*
606 * Manually tweak the ring indices to create a ring with no responses
607 * and the next request slot at position 2 from the end
608 */
609 rsize = RING_SIZE(&xnb_unit_pvt.txf);
610 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
611 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
612 xnb_unit_pvt.txs->req_prod = rsize - 2;
613 xnb_unit_pvt.txs->req_event = rsize - 1;
614 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
615 xnb_unit_pvt.txs->rsp_event = rsize - 1;
616 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
617 xnb_unit_pvt.txb.req_cons = rsize - 2;
618
619 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
620 xnb_unit_pvt.txf.req_prod_pvt);
621 req->flags = NETTXF_more_data;
622 req->size = 550;
623 xnb_unit_pvt.txf.req_prod_pvt++;
624
625 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
626 xnb_unit_pvt.txf.req_prod_pvt);
627 req->flags = NETTXF_more_data;
628 req->size = 100;
629 xnb_unit_pvt.txf.req_prod_pvt++;
630
631 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
632 xnb_unit_pvt.txf.req_prod_pvt);
633 req->flags = 0;
634 req->size = 50;
635 xnb_unit_pvt.txf.req_prod_pvt++;
636
637 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
638
639 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
640 xnb_unit_pvt.txb.req_cons);
641 XNB_ASSERT(num_consumed == 3);
642 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
643 XNB_ASSERT(pkt.list_len == 3);
644 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
645 }
646
647
648 /**
649 * xnb_txpkt2rsp should do nothing for an empty packet
650 */
651 static void
xnb_txpkt2rsp_emptypkt(char * buffer,size_t buflen)652 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
653 {
654 int num_consumed;
655 struct xnb_pkt pkt;
656 netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
657 netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
658 pkt.list_len = 0;
659
660 /* must call xnb_ring2pkt just to intialize pkt */
661 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
662 xnb_unit_pvt.txb.req_cons);
663 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
664 XNB_ASSERT(
665 memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
666 XNB_ASSERT(
667 memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
668 }
669
670 /**
671 * xnb_txpkt2rsp responding to one request
672 */
673 static void
xnb_txpkt2rsp_1req(char * buffer,size_t buflen)674 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
675 {
676 uint16_t num_consumed;
677 struct xnb_pkt pkt;
678 struct netif_tx_request *req;
679 struct netif_tx_response *rsp;
680
681 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
682 xnb_unit_pvt.txf.req_prod_pvt);
683 req->size = 1000;
684 req->flags = 0;
685 xnb_unit_pvt.txf.req_prod_pvt++;
686
687 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
688
689 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
690 xnb_unit_pvt.txb.req_cons);
691 xnb_unit_pvt.txb.req_cons += num_consumed;
692
693 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
694 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
695
696 XNB_ASSERT(
697 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
698 XNB_ASSERT(rsp->id == req->id);
699 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
700 };
701
702 /**
703 * xnb_txpkt2rsp responding to 1 data request and 1 extra info
704 */
705 static void
xnb_txpkt2rsp_extra(char * buffer,size_t buflen)706 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
707 {
708 uint16_t num_consumed;
709 struct xnb_pkt pkt;
710 struct netif_tx_request *req;
711 netif_extra_info_t *ext;
712 struct netif_tx_response *rsp;
713
714 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
715 xnb_unit_pvt.txf.req_prod_pvt);
716 req->size = 1000;
717 req->flags = NETTXF_extra_info;
718 req->id = 69;
719 xnb_unit_pvt.txf.req_prod_pvt++;
720
721 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
722 xnb_unit_pvt.txf.req_prod_pvt);
723 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
724 ext->flags = 0;
725 xnb_unit_pvt.txf.req_prod_pvt++;
726
727 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
728
729 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
730 xnb_unit_pvt.txb.req_cons);
731 xnb_unit_pvt.txb.req_cons += num_consumed;
732
733 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
734
735 XNB_ASSERT(
736 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
737
738 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
739 XNB_ASSERT(rsp->id == req->id);
740 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
741
742 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
743 xnb_unit_pvt.txf.rsp_cons + 1);
744 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
745 };
746
747 /**
748 * xnb_pkg2rsp responding to 3 data requests and 1 extra info
749 */
750 static void
xnb_txpkt2rsp_long(char * buffer,size_t buflen)751 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
752 {
753 uint16_t num_consumed;
754 struct xnb_pkt pkt;
755 struct netif_tx_request *req;
756 netif_extra_info_t *ext;
757 struct netif_tx_response *rsp;
758
759 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
760 xnb_unit_pvt.txf.req_prod_pvt);
761 req->size = 1000;
762 req->flags = NETTXF_extra_info | NETTXF_more_data;
763 req->id = 254;
764 xnb_unit_pvt.txf.req_prod_pvt++;
765
766 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
767 xnb_unit_pvt.txf.req_prod_pvt);
768 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
769 ext->flags = 0;
770 xnb_unit_pvt.txf.req_prod_pvt++;
771
772 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
773 xnb_unit_pvt.txf.req_prod_pvt);
774 req->size = 300;
775 req->flags = NETTXF_more_data;
776 req->id = 1034;
777 xnb_unit_pvt.txf.req_prod_pvt++;
778
779 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
780 xnb_unit_pvt.txf.req_prod_pvt);
781 req->size = 400;
782 req->flags = 0;
783 req->id = 34;
784 xnb_unit_pvt.txf.req_prod_pvt++;
785
786 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
787
788 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
789 xnb_unit_pvt.txb.req_cons);
790 xnb_unit_pvt.txb.req_cons += num_consumed;
791
792 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
793
794 XNB_ASSERT(
795 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
796
797 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
798 XNB_ASSERT(rsp->id ==
799 RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
800 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
801
802 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
803 xnb_unit_pvt.txf.rsp_cons + 1);
804 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
805
806 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
807 xnb_unit_pvt.txf.rsp_cons + 2);
808 XNB_ASSERT(rsp->id ==
809 RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
810 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
811
812 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
813 xnb_unit_pvt.txf.rsp_cons + 3);
814 XNB_ASSERT(rsp->id ==
815 RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
816 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
817 }
818
819 /**
820 * xnb_txpkt2rsp responding to an invalid packet.
821 * Note: this test will result in an error message being printed to the console
822 * such as:
823 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255. Discarding packet
824 */
825 static void
xnb_txpkt2rsp_invalid(char * buffer,size_t buflen)826 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
827 {
828 uint16_t num_consumed;
829 struct xnb_pkt pkt;
830 struct netif_tx_request *req;
831 netif_extra_info_t *ext;
832 struct netif_tx_response *rsp;
833
834 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
835 xnb_unit_pvt.txf.req_prod_pvt);
836 req->size = 1000;
837 req->flags = NETTXF_extra_info;
838 req->id = 69;
839 xnb_unit_pvt.txf.req_prod_pvt++;
840
841 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
842 xnb_unit_pvt.txf.req_prod_pvt);
843 ext->type = 0xFF; /* Invalid extra type */
844 ext->flags = 0;
845 xnb_unit_pvt.txf.req_prod_pvt++;
846
847 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
848
849 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
850 xnb_unit_pvt.txb.req_cons);
851 xnb_unit_pvt.txb.req_cons += num_consumed;
852 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
853
854 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
855
856 XNB_ASSERT(
857 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
858
859 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
860 XNB_ASSERT(rsp->id == req->id);
861 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
862
863 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
864 xnb_unit_pvt.txf.rsp_cons + 1);
865 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
866 };
867
868 /**
869 * xnb_txpkt2rsp responding to one request which caused an error
870 */
871 static void
xnb_txpkt2rsp_error(char * buffer,size_t buflen)872 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
873 {
874 uint16_t num_consumed;
875 struct xnb_pkt pkt;
876 struct netif_tx_request *req;
877 struct netif_tx_response *rsp;
878
879 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
880 xnb_unit_pvt.txf.req_prod_pvt);
881 req->size = 1000;
882 req->flags = 0;
883 xnb_unit_pvt.txf.req_prod_pvt++;
884
885 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
886
887 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
888 xnb_unit_pvt.txb.req_cons);
889 xnb_unit_pvt.txb.req_cons += num_consumed;
890
891 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
892 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
893
894 XNB_ASSERT(
895 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
896 XNB_ASSERT(rsp->id == req->id);
897 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
898 };
899
900 /**
901 * xnb_txpkt2rsp's responses wrap around the end of the ring
902 */
903 static void
xnb_txpkt2rsp_wraps(char * buffer,size_t buflen)904 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
905 {
906 struct xnb_pkt pkt;
907 int num_consumed;
908 struct netif_tx_request *req;
909 struct netif_tx_response *rsp;
910 unsigned int rsize;
911
912 /*
913 * Manually tweak the ring indices to create a ring with no responses
914 * and the next request slot at position 2 from the end
915 */
916 rsize = RING_SIZE(&xnb_unit_pvt.txf);
917 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
918 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
919 xnb_unit_pvt.txs->req_prod = rsize - 2;
920 xnb_unit_pvt.txs->req_event = rsize - 1;
921 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
922 xnb_unit_pvt.txs->rsp_event = rsize - 1;
923 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
924 xnb_unit_pvt.txb.req_cons = rsize - 2;
925
926 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
927 xnb_unit_pvt.txf.req_prod_pvt);
928 req->flags = NETTXF_more_data;
929 req->size = 550;
930 req->id = 1;
931 xnb_unit_pvt.txf.req_prod_pvt++;
932
933 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
934 xnb_unit_pvt.txf.req_prod_pvt);
935 req->flags = NETTXF_more_data;
936 req->size = 100;
937 req->id = 2;
938 xnb_unit_pvt.txf.req_prod_pvt++;
939
940 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
941 xnb_unit_pvt.txf.req_prod_pvt);
942 req->flags = 0;
943 req->size = 50;
944 req->id = 3;
945 xnb_unit_pvt.txf.req_prod_pvt++;
946
947 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
948
949 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
950 xnb_unit_pvt.txb.req_cons);
951
952 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
953
954 XNB_ASSERT(
955 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
956 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
957 xnb_unit_pvt.txf.rsp_cons + 2);
958 XNB_ASSERT(rsp->id == req->id);
959 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
960 }
961
962
963 /**
964 * Helper function used to setup pkt2mbufc tests
965 * \param size size in bytes of the single request to push to the ring
966 * \param flags optional flags to put in the netif request
967 * \param[out] pkt the returned packet object
968 * \return number of requests consumed from the ring
969 */
970 static int
xnb_get1pkt(struct xnb_pkt * pkt,size_t size,uint16_t flags)971 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
972 {
973 struct netif_tx_request *req;
974
975 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
976 xnb_unit_pvt.txf.req_prod_pvt);
977 req->flags = flags;
978 req->size = size;
979 xnb_unit_pvt.txf.req_prod_pvt++;
980
981 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
982
983 return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
984 xnb_unit_pvt.txb.req_cons);
985 }
986
987 /**
988 * xnb_pkt2mbufc on an empty packet
989 */
990 static void
xnb_pkt2mbufc_empty(char * buffer,size_t buflen)991 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
992 {
993 int num_consumed;
994 struct xnb_pkt pkt;
995 struct mbuf *pMbuf;
996 pkt.list_len = 0;
997
998 /* must call xnb_ring2pkt just to intialize pkt */
999 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
1000 xnb_unit_pvt.txb.req_cons);
1001 pkt.size = 0;
1002 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1003 safe_m_freem(&pMbuf);
1004 }
1005
1006 /**
1007 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1008 */
1009 static void
xnb_pkt2mbufc_short(char * buffer,size_t buflen)1010 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1011 {
1012 const size_t size = MINCLSIZE - 1;
1013 struct xnb_pkt pkt;
1014 struct mbuf *pMbuf;
1015
1016 xnb_get1pkt(&pkt, size, 0);
1017
1018 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1019 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1020 safe_m_freem(&pMbuf);
1021 }
1022
1023 /**
1024 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1025 */
1026 static void
xnb_pkt2mbufc_csum(char * buffer,size_t buflen)1027 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1028 {
1029 const size_t size = MINCLSIZE - 1;
1030 struct xnb_pkt pkt;
1031 struct mbuf *pMbuf;
1032
1033 xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1034
1035 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1036 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1037 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1038 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1039 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1040 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1041 safe_m_freem(&pMbuf);
1042 }
1043
1044 /**
1045 * xnb_pkt2mbufc on packet that can fit in one cluster
1046 */
1047 static void
xnb_pkt2mbufc_1cluster(char * buffer,size_t buflen)1048 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1049 {
1050 const size_t size = MINCLSIZE;
1051 struct xnb_pkt pkt;
1052 struct mbuf *pMbuf;
1053
1054 xnb_get1pkt(&pkt, size, 0);
1055
1056 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1057 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1058 safe_m_freem(&pMbuf);
1059 }
1060
1061 /**
1062 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1063 */
1064 static void
xnb_pkt2mbufc_largecluster(char * buffer,size_t buflen)1065 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1066 {
1067 const size_t size = MCLBYTES + 1;
1068 struct xnb_pkt pkt;
1069 struct mbuf *pMbuf;
1070
1071 xnb_get1pkt(&pkt, size, 0);
1072
1073 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1074 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1075 safe_m_freem(&pMbuf);
1076 }
1077
1078 /**
1079 * xnb_pkt2mbufc on packet that cannot fit in one clusters
1080 */
1081 static void
xnb_pkt2mbufc_2cluster(char * buffer,size_t buflen)1082 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1083 {
1084 const size_t size = 2 * MCLBYTES + 1;
1085 size_t space = 0;
1086 struct xnb_pkt pkt;
1087 struct mbuf *pMbuf;
1088 struct mbuf *m;
1089
1090 xnb_get1pkt(&pkt, size, 0);
1091
1092 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1093
1094 for (m = pMbuf; m != NULL; m = m->m_next) {
1095 space += M_TRAILINGSPACE(m);
1096 }
1097 XNB_ASSERT(space >= size);
1098 safe_m_freem(&pMbuf);
1099 }
1100
1101 /**
1102 * xnb_txpkt2gnttab on an empty packet. Should return empty gnttab
1103 */
1104 static void
xnb_txpkt2gnttab_empty(char * buffer,size_t buflen)1105 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1106 {
1107 int n_entries;
1108 struct xnb_pkt pkt;
1109 struct mbuf *pMbuf;
1110 pkt.list_len = 0;
1111
1112 /* must call xnb_ring2pkt just to intialize pkt */
1113 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1114 pkt.size = 0;
1115 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1116 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1117 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1118 XNB_ASSERT(n_entries == 0);
1119 safe_m_freem(&pMbuf);
1120 }
1121
1122 /**
1123 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1124 * and has one request
1125 */
1126 static void
xnb_txpkt2gnttab_short(char * buffer,size_t buflen)1127 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1128 {
1129 const size_t size = MINCLSIZE - 1;
1130 int n_entries;
1131 struct xnb_pkt pkt;
1132 struct mbuf *pMbuf;
1133
1134 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1135 xnb_unit_pvt.txf.req_prod_pvt);
1136 req->flags = 0;
1137 req->size = size;
1138 req->gref = 7;
1139 req->offset = 17;
1140 xnb_unit_pvt.txf.req_prod_pvt++;
1141
1142 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1143
1144 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1145
1146 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1147 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1148 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1149 XNB_ASSERT(n_entries == 1);
1150 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1151 /* flags should indicate gref's for source */
1152 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1153 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1154 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1155 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1156 mtod(pMbuf, vm_offset_t)));
1157 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1158 virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1159 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1160 safe_m_freem(&pMbuf);
1161 }
1162
1163 /**
1164 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1165 * mbuf cluster
1166 */
1167 static void
xnb_txpkt2gnttab_2req(char * buffer,size_t buflen)1168 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1169 {
1170 int n_entries;
1171 struct xnb_pkt pkt;
1172 struct mbuf *pMbuf;
1173
1174 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1175 xnb_unit_pvt.txf.req_prod_pvt);
1176 req->flags = NETTXF_more_data;
1177 req->size = 1900;
1178 req->gref = 7;
1179 req->offset = 0;
1180 xnb_unit_pvt.txf.req_prod_pvt++;
1181
1182 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1183 xnb_unit_pvt.txf.req_prod_pvt);
1184 req->flags = 0;
1185 req->size = 500;
1186 req->gref = 8;
1187 req->offset = 0;
1188 xnb_unit_pvt.txf.req_prod_pvt++;
1189
1190 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1191
1192 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1193
1194 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1195 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1196 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1197
1198 XNB_ASSERT(n_entries == 2);
1199 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1200 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1201 mtod(pMbuf, vm_offset_t)));
1202
1203 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1204 XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1205 mtod(pMbuf, vm_offset_t) + 1400));
1206 safe_m_freem(&pMbuf);
1207 }
1208
1209 /**
1210 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1211 */
1212 static void
xnb_txpkt2gnttab_2cluster(char * buffer,size_t buflen)1213 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1214 {
1215 int n_entries;
1216 struct xnb_pkt pkt;
1217 struct mbuf *pMbuf;
1218 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1219
1220 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1221 xnb_unit_pvt.txf.req_prod_pvt);
1222 req->flags = 0;
1223 req->size = data_this_transaction;
1224 req->gref = 8;
1225 req->offset = 0;
1226 xnb_unit_pvt.txf.req_prod_pvt++;
1227
1228 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1229 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1230
1231 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1232 XNB_ASSERT(pMbuf != NULL);
1233 if (pMbuf == NULL)
1234 return;
1235
1236 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1237 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1238
1239 if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1240 /* there should be three mbufs and three gnttab entries */
1241 XNB_ASSERT(n_entries == 3);
1242 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1243 XNB_ASSERT(
1244 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1245 mtod(pMbuf, vm_offset_t)));
1246 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1247
1248 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1249 XNB_ASSERT(
1250 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1251 mtod(pMbuf->m_next, vm_offset_t)));
1252 XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1253
1254 XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1255 XNB_ASSERT(
1256 xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1257 mtod(pMbuf->m_next, vm_offset_t)));
1258 XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1259 MCLBYTES);
1260 } else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1261 /* there should be two mbufs and two gnttab entries */
1262 XNB_ASSERT(n_entries == 2);
1263 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1264 XNB_ASSERT(
1265 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1266 mtod(pMbuf, vm_offset_t)));
1267 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1268
1269 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1270 XNB_ASSERT(
1271 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1272 mtod(pMbuf->m_next, vm_offset_t)));
1273 XNB_ASSERT(
1274 xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1275
1276 } else {
1277 /* should never get here */
1278 XNB_ASSERT(0);
1279 }
1280 m_freem(pMbuf);
1281 }
1282
1283
1284 /**
1285 * xnb_update_mbufc on a short packet that only has one gnttab entry
1286 */
1287 static void
xnb_update_mbufc_short(char * buffer,size_t buflen)1288 xnb_update_mbufc_short(char *buffer, size_t buflen)
1289 {
1290 const size_t size = MINCLSIZE - 1;
1291 int n_entries;
1292 struct xnb_pkt pkt;
1293 struct mbuf *pMbuf;
1294
1295 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1296 xnb_unit_pvt.txf.req_prod_pvt);
1297 req->flags = 0;
1298 req->size = size;
1299 req->gref = 7;
1300 req->offset = 17;
1301 xnb_unit_pvt.txf.req_prod_pvt++;
1302
1303 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1304
1305 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1306
1307 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1308 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1309 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1310
1311 /* Update grant table's status fields as the hypervisor call would */
1312 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1313
1314 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1315 XNB_ASSERT(pMbuf->m_len == size);
1316 XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1317 safe_m_freem(&pMbuf);
1318 }
1319
1320 /**
1321 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1322 * mbuf cluster
1323 */
1324 static void
xnb_update_mbufc_2req(char * buffer,size_t buflen)1325 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1326 {
1327 int n_entries;
1328 struct xnb_pkt pkt;
1329 struct mbuf *pMbuf;
1330
1331 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1332 xnb_unit_pvt.txf.req_prod_pvt);
1333 req->flags = NETTXF_more_data;
1334 req->size = 1900;
1335 req->gref = 7;
1336 req->offset = 0;
1337 xnb_unit_pvt.txf.req_prod_pvt++;
1338
1339 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1340 xnb_unit_pvt.txf.req_prod_pvt);
1341 req->flags = 0;
1342 req->size = 500;
1343 req->gref = 8;
1344 req->offset = 0;
1345 xnb_unit_pvt.txf.req_prod_pvt++;
1346
1347 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1348
1349 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1350
1351 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1352 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1353 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1354
1355 /* Update grant table's status fields as the hypervisor call would */
1356 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1357 xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1358
1359 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1360 XNB_ASSERT(n_entries == 2);
1361 XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1362 XNB_ASSERT(pMbuf->m_len == 1900);
1363
1364 safe_m_freem(&pMbuf);
1365 }
1366
1367 /**
1368 * xnb_update_mbufc on a single request that spans two mbuf clusters
1369 */
1370 static void
xnb_update_mbufc_2cluster(char * buffer,size_t buflen)1371 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1372 {
1373 int i;
1374 int n_entries;
1375 struct xnb_pkt pkt;
1376 struct mbuf *pMbuf;
1377 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1378
1379 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1380 xnb_unit_pvt.txf.req_prod_pvt);
1381 req->flags = 0;
1382 req->size = data_this_transaction;
1383 req->gref = 8;
1384 req->offset = 0;
1385 xnb_unit_pvt.txf.req_prod_pvt++;
1386
1387 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1388 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1389
1390 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1391 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1392 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1393
1394 /* Update grant table's status fields */
1395 for (i = 0; i < n_entries; i++) {
1396 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1397 }
1398 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1399
1400 if (n_entries == 3) {
1401 /* there should be three mbufs and three gnttab entries */
1402 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1403 XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1404 XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1405 XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1406 } else if (n_entries == 2) {
1407 /* there should be two mbufs and two gnttab entries */
1408 XNB_ASSERT(n_entries == 2);
1409 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1410 XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1411 XNB_ASSERT(pMbuf->m_next->m_len == 1);
1412 } else {
1413 /* should never get here */
1414 XNB_ASSERT(0);
1415 }
1416 safe_m_freem(&pMbuf);
1417 }
1418
1419 /** xnb_mbufc2pkt on an empty mbufc */
1420 static void
xnb_mbufc2pkt_empty(char * buffer,size_t buflen)1421 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1422 struct xnb_pkt pkt;
1423 int free_slots = 64;
1424 struct mbuf *mbuf;
1425
1426 mbuf = m_get(M_WAITOK, MT_DATA);
1427 /*
1428 * note: it is illegal to set M_PKTHDR on a mbuf with no data. Doing so
1429 * will cause m_freem to segfault
1430 */
1431 XNB_ASSERT(mbuf->m_len == 0);
1432
1433 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1434 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1435
1436 safe_m_freem(&mbuf);
1437 }
1438
1439 /** xnb_mbufc2pkt on a short mbufc */
1440 static void
xnb_mbufc2pkt_short(char * buffer,size_t buflen)1441 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1442 struct xnb_pkt pkt;
1443 size_t size = 128;
1444 int free_slots = 64;
1445 RING_IDX start = 9;
1446 struct mbuf *mbuf;
1447
1448 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1449 mbuf->m_flags |= M_PKTHDR;
1450 mbuf->m_pkthdr.len = size;
1451 mbuf->m_len = size;
1452
1453 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1454 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1455 XNB_ASSERT(pkt.size == size);
1456 XNB_ASSERT(pkt.car_size == size);
1457 XNB_ASSERT(! (pkt.flags &
1458 (NETRXF_more_data | NETRXF_extra_info)));
1459 XNB_ASSERT(pkt.list_len == 1);
1460 XNB_ASSERT(pkt.car == start);
1461
1462 safe_m_freem(&mbuf);
1463 }
1464
1465 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1466 static void
xnb_mbufc2pkt_1cluster(char * buffer,size_t buflen)1467 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1468 struct xnb_pkt pkt;
1469 size_t size = MCLBYTES;
1470 int free_slots = 32;
1471 RING_IDX start = 12;
1472 struct mbuf *mbuf;
1473
1474 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1475 mbuf->m_flags |= M_PKTHDR;
1476 mbuf->m_pkthdr.len = size;
1477 mbuf->m_len = size;
1478
1479 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1480 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1481 XNB_ASSERT(pkt.size == size);
1482 XNB_ASSERT(pkt.car_size == size);
1483 XNB_ASSERT(! (pkt.flags &
1484 (NETRXF_more_data | NETRXF_extra_info)));
1485 XNB_ASSERT(pkt.list_len == 1);
1486 XNB_ASSERT(pkt.car == start);
1487
1488 safe_m_freem(&mbuf);
1489 }
1490
1491 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1492 static void
xnb_mbufc2pkt_2short(char * buffer,size_t buflen)1493 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1494 struct xnb_pkt pkt;
1495 size_t size1 = MHLEN - 5;
1496 size_t size2 = MHLEN - 15;
1497 int free_slots = 32;
1498 RING_IDX start = 14;
1499 struct mbuf *mbufc, *mbufc2;
1500
1501 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1502 XNB_ASSERT(mbufc != NULL);
1503 if (mbufc == NULL)
1504 return;
1505 mbufc->m_flags |= M_PKTHDR;
1506
1507 mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1508 XNB_ASSERT(mbufc2 != NULL);
1509 if (mbufc2 == NULL) {
1510 safe_m_freem(&mbufc);
1511 return;
1512 }
1513 mbufc2->m_pkthdr.len = size1 + size2;
1514 mbufc2->m_len = size1;
1515
1516 xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1517 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1518 XNB_ASSERT(pkt.size == size1 + size2);
1519 XNB_ASSERT(pkt.car == start);
1520 /*
1521 * The second m_getm may allocate a new mbuf and append
1522 * it to the chain, or it may simply extend the first mbuf.
1523 */
1524 if (mbufc2->m_next != NULL) {
1525 XNB_ASSERT(pkt.car_size == size1);
1526 XNB_ASSERT(pkt.list_len == 1);
1527 XNB_ASSERT(pkt.cdr == start + 1);
1528 }
1529
1530 safe_m_freem(&mbufc2);
1531 }
1532
1533 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1534 static void
xnb_mbufc2pkt_long(char * buffer,size_t buflen)1535 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1536 struct xnb_pkt pkt;
1537 size_t size = 14 * MCLBYTES / 3;
1538 size_t size_remaining;
1539 int free_slots = 15;
1540 RING_IDX start = 3;
1541 struct mbuf *mbufc, *m;
1542
1543 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1544 XNB_ASSERT(mbufc != NULL);
1545 if (mbufc == NULL)
1546 return;
1547 mbufc->m_flags |= M_PKTHDR;
1548
1549 mbufc->m_pkthdr.len = size;
1550 size_remaining = size;
1551 for (m = mbufc; m != NULL; m = m->m_next) {
1552 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1553 size_remaining -= m->m_len;
1554 }
1555
1556 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1557 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1558 XNB_ASSERT(pkt.size == size);
1559 XNB_ASSERT(pkt.car == start);
1560 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1561 /*
1562 * There should be >1 response in the packet, and there is no
1563 * extra info.
1564 */
1565 XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1566 XNB_ASSERT(pkt.cdr == pkt.car + 1);
1567
1568 safe_m_freem(&mbufc);
1569 }
1570
1571 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1572 static void
xnb_mbufc2pkt_extra(char * buffer,size_t buflen)1573 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1574 struct xnb_pkt pkt;
1575 size_t size = 14 * MCLBYTES / 3;
1576 size_t size_remaining;
1577 int free_slots = 15;
1578 RING_IDX start = 3;
1579 struct mbuf *mbufc, *m;
1580
1581 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1582 XNB_ASSERT(mbufc != NULL);
1583 if (mbufc == NULL)
1584 return;
1585
1586 mbufc->m_flags |= M_PKTHDR;
1587 mbufc->m_pkthdr.len = size;
1588 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1589 mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1590 size_remaining = size;
1591 for (m = mbufc; m != NULL; m = m->m_next) {
1592 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1593 size_remaining -= m->m_len;
1594 }
1595
1596 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1597 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1598 XNB_ASSERT(pkt.size == size);
1599 XNB_ASSERT(pkt.car == start);
1600 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1601 /* There should be >1 response in the packet, there is extra info */
1602 XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1603 XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1604 XNB_ASSERT(pkt.cdr == pkt.car + 2);
1605 XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1606 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1607 XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1608
1609 safe_m_freem(&mbufc);
1610 }
1611
1612 /** xnb_mbufc2pkt with insufficient space in the ring */
1613 static void
xnb_mbufc2pkt_nospace(char * buffer,size_t buflen)1614 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1615 struct xnb_pkt pkt;
1616 size_t size = 14 * MCLBYTES / 3;
1617 size_t size_remaining;
1618 int free_slots = 2;
1619 RING_IDX start = 3;
1620 struct mbuf *mbufc, *m;
1621 int error;
1622
1623 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1624 XNB_ASSERT(mbufc != NULL);
1625 if (mbufc == NULL)
1626 return;
1627 mbufc->m_flags |= M_PKTHDR;
1628
1629 mbufc->m_pkthdr.len = size;
1630 size_remaining = size;
1631 for (m = mbufc; m != NULL; m = m->m_next) {
1632 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1633 size_remaining -= m->m_len;
1634 }
1635
1636 error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1637 XNB_ASSERT(error == EAGAIN);
1638 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1639
1640 safe_m_freem(&mbufc);
1641 }
1642
1643 /**
1644 * xnb_rxpkt2gnttab on an empty packet. Should return empty gnttab
1645 */
1646 static void
xnb_rxpkt2gnttab_empty(char * buffer,size_t buflen)1647 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1648 {
1649 struct xnb_pkt pkt;
1650 int nr_entries;
1651 int free_slots = 60;
1652 struct mbuf *mbuf;
1653
1654 mbuf = m_get(M_WAITOK, MT_DATA);
1655
1656 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1657 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1658 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1659
1660 XNB_ASSERT(nr_entries == 0);
1661
1662 safe_m_freem(&mbuf);
1663 }
1664
1665 /** xnb_rxpkt2gnttab on a short packet without extra data */
1666 static void
xnb_rxpkt2gnttab_short(char * buffer,size_t buflen)1667 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1668 struct xnb_pkt pkt;
1669 int nr_entries;
1670 size_t size = 128;
1671 int free_slots = 60;
1672 RING_IDX start = 9;
1673 struct netif_rx_request *req;
1674 struct mbuf *mbuf;
1675
1676 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1677 mbuf->m_flags |= M_PKTHDR;
1678 mbuf->m_pkthdr.len = size;
1679 mbuf->m_len = size;
1680
1681 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1682 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1683 xnb_unit_pvt.txf.req_prod_pvt);
1684 req->gref = 7;
1685
1686 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1687 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1688
1689 XNB_ASSERT(nr_entries == 1);
1690 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1691 /* flags should indicate gref's for dest */
1692 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1693 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1694 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1695 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1696 mtod(mbuf, vm_offset_t)));
1697 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1698 virt_to_mfn(mtod(mbuf, vm_offset_t)));
1699 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1700
1701 safe_m_freem(&mbuf);
1702 }
1703
1704 /**
1705 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1706 */
1707 static void
xnb_rxpkt2gnttab_2req(char * buffer,size_t buflen)1708 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1709 {
1710 struct xnb_pkt pkt;
1711 int nr_entries;
1712 int i, num_mbufs;
1713 size_t total_granted_size = 0;
1714 size_t size = MJUMPAGESIZE + 1;
1715 int free_slots = 60;
1716 RING_IDX start = 11;
1717 struct netif_rx_request *req;
1718 struct mbuf *mbuf, *m;
1719
1720 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1721 mbuf->m_flags |= M_PKTHDR;
1722 mbuf->m_pkthdr.len = size;
1723 mbuf->m_len = size;
1724
1725 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1726
1727 for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1728 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1729 xnb_unit_pvt.txf.req_prod_pvt);
1730 req->gref = i;
1731 req->id = 5;
1732 }
1733 num_mbufs = i;
1734
1735 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1736 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1737
1738 XNB_ASSERT(nr_entries >= num_mbufs);
1739 for (i = 0; i < nr_entries; i++) {
1740 int end_offset = xnb_unit_pvt.gnttab[i].len +
1741 xnb_unit_pvt.gnttab[i].dest.offset;
1742 XNB_ASSERT(end_offset <= PAGE_SIZE);
1743 total_granted_size += xnb_unit_pvt.gnttab[i].len;
1744 }
1745 XNB_ASSERT(total_granted_size == size);
1746 }
1747
1748 /**
1749 * xnb_rxpkt2rsp on an empty packet. Shouldn't make any response
1750 */
1751 static void
xnb_rxpkt2rsp_empty(char * buffer,size_t buflen)1752 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1753 {
1754 struct xnb_pkt pkt;
1755 int nr_entries;
1756 int nr_reqs;
1757 int free_slots = 60;
1758 netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1759 netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1760 struct mbuf *mbuf;
1761
1762 mbuf = m_get(M_WAITOK, MT_DATA);
1763
1764 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1765 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1766 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1767
1768 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1769 &xnb_unit_pvt.rxb);
1770 XNB_ASSERT(nr_reqs == 0);
1771 XNB_ASSERT(
1772 memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1773 XNB_ASSERT(
1774 memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1775
1776 safe_m_freem(&mbuf);
1777 }
1778
1779 /**
1780 * xnb_rxpkt2rsp on a short packet with no extras
1781 */
1782 static void
xnb_rxpkt2rsp_short(char * buffer,size_t buflen)1783 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1784 {
1785 struct xnb_pkt pkt;
1786 int nr_entries, nr_reqs;
1787 size_t size = 128;
1788 int free_slots = 60;
1789 RING_IDX start = 5;
1790 struct netif_rx_request *req;
1791 struct netif_rx_response *rsp;
1792 struct mbuf *mbuf;
1793
1794 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1795 mbuf->m_flags |= M_PKTHDR;
1796 mbuf->m_pkthdr.len = size;
1797 mbuf->m_len = size;
1798
1799 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1800 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1801 req->gref = 7;
1802 xnb_unit_pvt.rxb.req_cons = start;
1803 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1804 xnb_unit_pvt.rxs->req_prod = start + 1;
1805 xnb_unit_pvt.rxs->rsp_prod = start;
1806
1807 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1808 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1809
1810 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1811 &xnb_unit_pvt.rxb);
1812
1813 XNB_ASSERT(nr_reqs == 1);
1814 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1815 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1816 XNB_ASSERT(rsp->id == req->id);
1817 XNB_ASSERT(rsp->offset == 0);
1818 XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1819 XNB_ASSERT(rsp->status == size);
1820
1821 safe_m_freem(&mbuf);
1822 }
1823
1824 /**
1825 * xnb_rxpkt2rsp with extra data
1826 */
1827 static void
xnb_rxpkt2rsp_extra(char * buffer,size_t buflen)1828 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1829 {
1830 struct xnb_pkt pkt;
1831 int nr_entries, nr_reqs;
1832 size_t size = 14;
1833 int free_slots = 15;
1834 RING_IDX start = 3;
1835 uint16_t id = 49;
1836 uint16_t gref = 65;
1837 uint16_t mss = TCP_MSS - 40;
1838 struct mbuf *mbufc;
1839 struct netif_rx_request *req;
1840 struct netif_rx_response *rsp;
1841 struct netif_extra_info *ext;
1842
1843 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1844 XNB_ASSERT(mbufc != NULL);
1845 if (mbufc == NULL)
1846 return;
1847
1848 mbufc->m_flags |= M_PKTHDR;
1849 mbufc->m_pkthdr.len = size;
1850 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1851 mbufc->m_pkthdr.tso_segsz = mss;
1852 mbufc->m_len = size;
1853
1854 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1855 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1856 req->id = id;
1857 req->gref = gref;
1858 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1859 req->id = id + 1;
1860 req->gref = gref + 1;
1861 xnb_unit_pvt.rxb.req_cons = start;
1862 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1863 xnb_unit_pvt.rxs->req_prod = start + 2;
1864 xnb_unit_pvt.rxs->rsp_prod = start;
1865
1866 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1867 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1868
1869 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1870 &xnb_unit_pvt.rxb);
1871
1872 XNB_ASSERT(nr_reqs == 2);
1873 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1874 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1875 XNB_ASSERT(rsp->id == id);
1876 XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1877 XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1878 XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1879 XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1880 XNB_ASSERT(rsp->status == size);
1881
1882 ext = (struct netif_extra_info*)
1883 RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1884 XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1885 XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1886 XNB_ASSERT(ext->u.gso.size == mss);
1887 XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1888
1889 safe_m_freem(&mbufc);
1890 }
1891
1892 /**
1893 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data. It should
1894 * generate two response slot
1895 */
1896 static void
xnb_rxpkt2rsp_2slots(char * buffer,size_t buflen)1897 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1898 {
1899 struct xnb_pkt pkt;
1900 int nr_entries, nr_reqs;
1901 size_t size = PAGE_SIZE + 100;
1902 int free_slots = 3;
1903 uint16_t id1 = 17;
1904 uint16_t id2 = 37;
1905 uint16_t gref1 = 24;
1906 uint16_t gref2 = 34;
1907 RING_IDX start = 15;
1908 struct netif_rx_request *req;
1909 struct netif_rx_response *rsp;
1910 struct mbuf *mbuf;
1911
1912 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1913 mbuf->m_flags |= M_PKTHDR;
1914 mbuf->m_pkthdr.len = size;
1915 if (mbuf->m_next != NULL) {
1916 size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1917 mbuf->m_len = first_len;
1918 mbuf->m_next->m_len = size - first_len;
1919
1920 } else {
1921 mbuf->m_len = size;
1922 }
1923
1924 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1925 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1926 req->gref = gref1;
1927 req->id = id1;
1928 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1929 req->gref = gref2;
1930 req->id = id2;
1931 xnb_unit_pvt.rxb.req_cons = start;
1932 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1933 xnb_unit_pvt.rxs->req_prod = start + 2;
1934 xnb_unit_pvt.rxs->rsp_prod = start;
1935
1936 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1937 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1938
1939 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1940 &xnb_unit_pvt.rxb);
1941
1942 XNB_ASSERT(nr_reqs == 2);
1943 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1944 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1945 XNB_ASSERT(rsp->id == id1);
1946 XNB_ASSERT(rsp->offset == 0);
1947 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1948 XNB_ASSERT(rsp->flags & NETRXF_more_data);
1949 XNB_ASSERT(rsp->status == PAGE_SIZE);
1950
1951 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1952 XNB_ASSERT(rsp->id == id2);
1953 XNB_ASSERT(rsp->offset == 0);
1954 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1955 XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1956 XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1957
1958 safe_m_freem(&mbuf);
1959 }
1960
1961 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1962 static void
xnb_rxpkt2rsp_2short(char * buffer,size_t buflen)1963 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1964 struct xnb_pkt pkt;
1965 int nr_reqs, nr_entries;
1966 size_t size1 = MHLEN - 5;
1967 size_t size2 = MHLEN - 15;
1968 int free_slots = 32;
1969 RING_IDX start = 14;
1970 uint16_t id = 47;
1971 uint16_t gref = 54;
1972 struct netif_rx_request *req;
1973 struct netif_rx_response *rsp;
1974 struct mbuf *mbufc;
1975
1976 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1977 XNB_ASSERT(mbufc != NULL);
1978 if (mbufc == NULL)
1979 return;
1980 mbufc->m_flags |= M_PKTHDR;
1981
1982 m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1983 XNB_ASSERT(mbufc->m_next != NULL);
1984 mbufc->m_pkthdr.len = size1 + size2;
1985 mbufc->m_len = size1;
1986 mbufc->m_next->m_len = size2;
1987
1988 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1989
1990 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1991 req->gref = gref;
1992 req->id = id;
1993 xnb_unit_pvt.rxb.req_cons = start;
1994 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1995 xnb_unit_pvt.rxs->req_prod = start + 1;
1996 xnb_unit_pvt.rxs->rsp_prod = start;
1997
1998 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1999 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2000
2001 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2002 &xnb_unit_pvt.rxb);
2003
2004 XNB_ASSERT(nr_entries == 2);
2005 XNB_ASSERT(nr_reqs == 1);
2006 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2007 XNB_ASSERT(rsp->id == id);
2008 XNB_ASSERT(rsp->status == size1 + size2);
2009 XNB_ASSERT(rsp->offset == 0);
2010 XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2011
2012 safe_m_freem(&mbufc);
2013 }
2014
2015 /**
2016 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2017 * Note: this test will result in an error message being printed to the console
2018 * such as:
2019 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2020 */
2021 static void
xnb_rxpkt2rsp_copyerror(char * buffer,size_t buflen)2022 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2023 {
2024 struct xnb_pkt pkt;
2025 int nr_entries, nr_reqs;
2026 int id = 7;
2027 int gref = 42;
2028 uint16_t canary = 6859;
2029 size_t size = 7 * MCLBYTES;
2030 int free_slots = 9;
2031 RING_IDX start = 2;
2032 struct netif_rx_request *req;
2033 struct netif_rx_response *rsp;
2034 struct mbuf *mbuf;
2035
2036 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2037 mbuf->m_flags |= M_PKTHDR;
2038 mbuf->m_pkthdr.len = size;
2039 mbuf->m_len = size;
2040
2041 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2042 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2043 req->gref = gref;
2044 req->id = id;
2045 xnb_unit_pvt.rxb.req_cons = start;
2046 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2047 xnb_unit_pvt.rxs->req_prod = start + 1;
2048 xnb_unit_pvt.rxs->rsp_prod = start;
2049 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2050 req->gref = canary;
2051 req->id = canary;
2052
2053 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2054 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2055 /* Inject the error*/
2056 xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2057
2058 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2059 &xnb_unit_pvt.rxb);
2060
2061 XNB_ASSERT(nr_reqs == 1);
2062 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2063 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2064 XNB_ASSERT(rsp->id == id);
2065 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2066 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2067 XNB_ASSERT(req->gref == canary);
2068 XNB_ASSERT(req->id == canary);
2069
2070 safe_m_freem(&mbuf);
2071 }
2072
2073 #if defined(INET) || defined(INET6)
2074 /**
2075 * xnb_add_mbuf_cksum on an ARP request packet
2076 */
2077 static void
xnb_add_mbuf_cksum_arp(char * buffer,size_t buflen)2078 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2079 {
2080 const size_t pkt_len = sizeof(struct ether_header) +
2081 sizeof(struct ether_arp);
2082 struct mbuf *mbufc;
2083 struct ether_header *eh;
2084 struct ether_arp *ep;
2085 unsigned char pkt_orig[pkt_len];
2086
2087 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2088 /* Fill in an example arp request */
2089 eh = mtod(mbufc, struct ether_header*);
2090 eh->ether_dhost[0] = 0xff;
2091 eh->ether_dhost[1] = 0xff;
2092 eh->ether_dhost[2] = 0xff;
2093 eh->ether_dhost[3] = 0xff;
2094 eh->ether_dhost[4] = 0xff;
2095 eh->ether_dhost[5] = 0xff;
2096 eh->ether_shost[0] = 0x00;
2097 eh->ether_shost[1] = 0x15;
2098 eh->ether_shost[2] = 0x17;
2099 eh->ether_shost[3] = 0xe9;
2100 eh->ether_shost[4] = 0x30;
2101 eh->ether_shost[5] = 0x68;
2102 eh->ether_type = htons(ETHERTYPE_ARP);
2103 ep = (struct ether_arp*)(eh + 1);
2104 ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2105 ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2106 ep->ea_hdr.ar_hln = 6;
2107 ep->ea_hdr.ar_pln = 4;
2108 ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2109 ep->arp_sha[0] = 0x00;
2110 ep->arp_sha[1] = 0x15;
2111 ep->arp_sha[2] = 0x17;
2112 ep->arp_sha[3] = 0xe9;
2113 ep->arp_sha[4] = 0x30;
2114 ep->arp_sha[5] = 0x68;
2115 ep->arp_spa[0] = 0xc0;
2116 ep->arp_spa[1] = 0xa8;
2117 ep->arp_spa[2] = 0x0a;
2118 ep->arp_spa[3] = 0x04;
2119 bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2120 ep->arp_tpa[0] = 0xc0;
2121 ep->arp_tpa[1] = 0xa8;
2122 ep->arp_tpa[2] = 0x0a;
2123 ep->arp_tpa[3] = 0x06;
2124
2125 /* fill in the length field */
2126 mbufc->m_len = pkt_len;
2127 mbufc->m_pkthdr.len = pkt_len;
2128 /* indicate that the netfront uses hw-assisted checksums */
2129 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2130 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2131
2132 /* Make a backup copy of the packet */
2133 bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2134
2135 /* Function under test */
2136 xnb_add_mbuf_cksum(mbufc);
2137
2138 /* Verify that the packet's data did not change */
2139 XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2140 m_freem(mbufc);
2141 }
2142
2143 /**
2144 * Helper function that populates the ethernet header and IP header used by
2145 * some of the xnb_add_mbuf_cksum unit tests. m must already be allocated
2146 * and must be large enough
2147 */
2148 static void
xnb_fill_eh_and_ip(struct mbuf * m,uint16_t ip_len,uint16_t ip_id,uint16_t ip_p,uint16_t ip_off,uint16_t ip_sum)2149 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2150 uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2151 {
2152 struct ether_header *eh;
2153 struct ip *iph;
2154
2155 eh = mtod(m, struct ether_header*);
2156 eh->ether_dhost[0] = 0x00;
2157 eh->ether_dhost[1] = 0x16;
2158 eh->ether_dhost[2] = 0x3e;
2159 eh->ether_dhost[3] = 0x23;
2160 eh->ether_dhost[4] = 0x50;
2161 eh->ether_dhost[5] = 0x0b;
2162 eh->ether_shost[0] = 0x00;
2163 eh->ether_shost[1] = 0x16;
2164 eh->ether_shost[2] = 0x30;
2165 eh->ether_shost[3] = 0x00;
2166 eh->ether_shost[4] = 0x00;
2167 eh->ether_shost[5] = 0x00;
2168 eh->ether_type = htons(ETHERTYPE_IP);
2169 iph = (struct ip*)(eh + 1);
2170 iph->ip_hl = 0x5; /* 5 dwords == 20 bytes */
2171 iph->ip_v = 4; /* IP v4 */
2172 iph->ip_tos = 0;
2173 iph->ip_len = htons(ip_len);
2174 iph->ip_id = htons(ip_id);
2175 iph->ip_off = htons(ip_off);
2176 iph->ip_ttl = 64;
2177 iph->ip_p = ip_p;
2178 iph->ip_sum = htons(ip_sum);
2179 iph->ip_src.s_addr = htonl(0xc0a80a04);
2180 iph->ip_dst.s_addr = htonl(0xc0a80a05);
2181 }
2182
2183 /**
2184 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2185 * ICMP packet
2186 */
2187 static void
xnb_add_mbuf_cksum_icmp(char * buffer,size_t buflen)2188 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2189 {
2190 const size_t icmp_len = 64; /* set by ping(1) */
2191 const size_t pkt_len = sizeof(struct ether_header) +
2192 sizeof(struct ip) + icmp_len;
2193 struct mbuf *mbufc;
2194 struct ether_header *eh;
2195 struct ip *iph;
2196 struct icmp *icmph;
2197 unsigned char pkt_orig[icmp_len];
2198 uint32_t *tv_field;
2199 uint8_t *data_payload;
2200 int i;
2201 const uint16_t ICMP_CSUM = 0xaed7;
2202 const uint16_t IP_CSUM = 0xe533;
2203
2204 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2205 /* Fill in an example ICMP ping request */
2206 eh = mtod(mbufc, struct ether_header*);
2207 xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2208 iph = (struct ip*)(eh + 1);
2209 icmph = (struct icmp*)(iph + 1);
2210 icmph->icmp_type = ICMP_ECHO;
2211 icmph->icmp_code = 0;
2212 icmph->icmp_cksum = htons(ICMP_CSUM);
2213 icmph->icmp_id = htons(31492);
2214 icmph->icmp_seq = htons(0);
2215 /*
2216 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2217 * For this test, we will set the bytes individually for portability.
2218 */
2219 tv_field = (uint32_t*)(&(icmph->icmp_hun));
2220 tv_field[0] = 0x4f02cfac;
2221 tv_field[1] = 0x0007c46a;
2222 /*
2223 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2224 */
2225 data_payload = (uint8_t*)(&tv_field[2]);
2226 for (i = 8; i < 37; i++) {
2227 *data_payload++ = i;
2228 }
2229
2230 /* fill in the length field */
2231 mbufc->m_len = pkt_len;
2232 mbufc->m_pkthdr.len = pkt_len;
2233 /* indicate that the netfront uses hw-assisted checksums */
2234 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2235 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2236
2237 bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2238 /* Function under test */
2239 xnb_add_mbuf_cksum(mbufc);
2240
2241 /* Check the IP checksum */
2242 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2243
2244 /* Check that the ICMP packet did not change */
2245 XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2246 m_freem(mbufc);
2247 }
2248
2249 /**
2250 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2251 * UDP packet
2252 */
2253 static void
xnb_add_mbuf_cksum_udp(char * buffer,size_t buflen)2254 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2255 {
2256 const size_t udp_len = 16;
2257 const size_t pkt_len = sizeof(struct ether_header) +
2258 sizeof(struct ip) + udp_len;
2259 struct mbuf *mbufc;
2260 struct ether_header *eh;
2261 struct ip *iph;
2262 struct udphdr *udp;
2263 uint8_t *data_payload;
2264 const uint16_t IP_CSUM = 0xe56b;
2265 const uint16_t UDP_CSUM = 0xdde2;
2266
2267 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2268 /* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2269 eh = mtod(mbufc, struct ether_header*);
2270 xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2271 iph = (struct ip*)(eh + 1);
2272 udp = (struct udphdr*)(iph + 1);
2273 udp->uh_sport = htons(0x51ae);
2274 udp->uh_dport = htons(0x08ae);
2275 udp->uh_ulen = htons(udp_len);
2276 udp->uh_sum = htons(0xbaad); /* xnb_add_mbuf_cksum will fill this in */
2277 data_payload = (uint8_t*)(udp + 1);
2278 data_payload[0] = 'F';
2279 data_payload[1] = 'r';
2280 data_payload[2] = 'e';
2281 data_payload[3] = 'e';
2282 data_payload[4] = 'B';
2283 data_payload[5] = 'S';
2284 data_payload[6] = 'D';
2285 data_payload[7] = '\n';
2286
2287 /* fill in the length field */
2288 mbufc->m_len = pkt_len;
2289 mbufc->m_pkthdr.len = pkt_len;
2290 /* indicate that the netfront uses hw-assisted checksums */
2291 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2292 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2293
2294 /* Function under test */
2295 xnb_add_mbuf_cksum(mbufc);
2296
2297 /* Check the checksums */
2298 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2299 XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2300
2301 m_freem(mbufc);
2302 }
2303
2304 /**
2305 * Helper function that populates a TCP packet used by all of the
2306 * xnb_add_mbuf_cksum tcp unit tests. m must already be allocated and must be
2307 * large enough
2308 */
2309 static void
xnb_fill_tcp(struct mbuf * m)2310 xnb_fill_tcp(struct mbuf *m)
2311 {
2312 struct ether_header *eh;
2313 struct ip *iph;
2314 struct tcphdr *tcp;
2315 uint32_t *options;
2316 uint8_t *data_payload;
2317
2318 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2319 eh = mtod(m, struct ether_header*);
2320 xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2321 iph = (struct ip*)(eh + 1);
2322 tcp = (struct tcphdr*)(iph + 1);
2323 tcp->th_sport = htons(0x9cd9);
2324 tcp->th_dport = htons(2222);
2325 tcp->th_seq = htonl(0x00f72b10);
2326 tcp->th_ack = htonl(0x7f37ba6c);
2327 tcp->th_x2 = 0;
2328 tcp->th_off = 8;
2329 tcp->th_flags = 0x18;
2330 tcp->th_win = htons(0x410);
2331 /* th_sum is incorrect; will be inserted by function under test */
2332 tcp->th_sum = htons(0xbaad);
2333 tcp->th_urp = htons(0);
2334 /*
2335 * The following 12 bytes of options encode:
2336 * [nop, nop, TS val 33247 ecr 3457687679]
2337 */
2338 options = (uint32_t*)(tcp + 1);
2339 options[0] = htonl(0x0101080a);
2340 options[1] = htonl(0x000081df);
2341 options[2] = htonl(0xce18207f);
2342 data_payload = (uint8_t*)(&options[3]);
2343 data_payload[0] = 'F';
2344 data_payload[1] = 'r';
2345 data_payload[2] = 'e';
2346 data_payload[3] = 'e';
2347 data_payload[4] = 'B';
2348 data_payload[5] = 'S';
2349 data_payload[6] = 'D';
2350 data_payload[7] = '\n';
2351 }
2352
2353 /**
2354 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2355 * packet
2356 */
2357 static void
xnb_add_mbuf_cksum_tcp(char * buffer,size_t buflen)2358 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2359 {
2360 const size_t payload_len = 8;
2361 const size_t tcp_options_len = 12;
2362 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2363 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2364 struct mbuf *mbufc;
2365 struct ether_header *eh;
2366 struct ip *iph;
2367 struct tcphdr *tcp;
2368 const uint16_t IP_CSUM = 0xa55a;
2369 const uint16_t TCP_CSUM = 0x2f64;
2370
2371 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2372 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2373 xnb_fill_tcp(mbufc);
2374 eh = mtod(mbufc, struct ether_header*);
2375 iph = (struct ip*)(eh + 1);
2376 tcp = (struct tcphdr*)(iph + 1);
2377
2378 /* fill in the length field */
2379 mbufc->m_len = pkt_len;
2380 mbufc->m_pkthdr.len = pkt_len;
2381 /* indicate that the netfront uses hw-assisted checksums */
2382 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2383 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2384
2385 /* Function under test */
2386 xnb_add_mbuf_cksum(mbufc);
2387
2388 /* Check the checksums */
2389 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2390 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2391
2392 m_freem(mbufc);
2393 }
2394
2395 /**
2396 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2397 */
2398 static void
xnb_add_mbuf_cksum_tcp_swcksum(char * buffer,size_t buflen)2399 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2400 {
2401 const size_t payload_len = 8;
2402 const size_t tcp_options_len = 12;
2403 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2404 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2405 struct mbuf *mbufc;
2406 struct ether_header *eh;
2407 struct ip *iph;
2408 struct tcphdr *tcp;
2409 /* Use deliberately bad checksums, and verify that they don't get */
2410 /* corrected by xnb_add_mbuf_cksum */
2411 const uint16_t IP_CSUM = 0xdead;
2412 const uint16_t TCP_CSUM = 0xbeef;
2413
2414 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2415 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2416 xnb_fill_tcp(mbufc);
2417 eh = mtod(mbufc, struct ether_header*);
2418 iph = (struct ip*)(eh + 1);
2419 iph->ip_sum = htons(IP_CSUM);
2420 tcp = (struct tcphdr*)(iph + 1);
2421 tcp->th_sum = htons(TCP_CSUM);
2422
2423 /* fill in the length field */
2424 mbufc->m_len = pkt_len;
2425 mbufc->m_pkthdr.len = pkt_len;
2426 /* indicate that the netfront does not use hw-assisted checksums */
2427 mbufc->m_pkthdr.csum_flags = 0;
2428
2429 /* Function under test */
2430 xnb_add_mbuf_cksum(mbufc);
2431
2432 /* Check that the checksums didn't change */
2433 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2434 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2435
2436 m_freem(mbufc);
2437 }
2438 #endif /* INET || INET6 */
2439
2440 /**
2441 * sscanf on unsigned chars
2442 */
2443 static void
xnb_sscanf_hhu(char * buffer,size_t buflen)2444 xnb_sscanf_hhu(char *buffer, size_t buflen)
2445 {
2446 const char mystr[] = "137";
2447 uint8_t dest[12];
2448 int i;
2449
2450 for (i = 0; i < 12; i++)
2451 dest[i] = 'X';
2452
2453 XNB_ASSERT(sscanf(mystr, "%hhu", &dest[4]) == 1);
2454 for (i = 0; i < 12; i++)
2455 XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2456 }
2457
2458 /**
2459 * sscanf on signed chars
2460 */
2461 static void
xnb_sscanf_hhd(char * buffer,size_t buflen)2462 xnb_sscanf_hhd(char *buffer, size_t buflen)
2463 {
2464 const char mystr[] = "-27";
2465 int8_t dest[12];
2466 int i;
2467
2468 for (i = 0; i < 12; i++)
2469 dest[i] = 'X';
2470
2471 XNB_ASSERT(sscanf(mystr, "%hhd", &dest[4]) == 1);
2472 for (i = 0; i < 12; i++)
2473 XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2474 }
2475
2476 /**
2477 * sscanf on signed long longs
2478 */
2479 static void
xnb_sscanf_lld(char * buffer,size_t buflen)2480 xnb_sscanf_lld(char *buffer, size_t buflen)
2481 {
2482 const char mystr[] = "-123456789012345"; /* about -2**47 */
2483 long long dest[3];
2484 int i;
2485
2486 for (i = 0; i < 3; i++)
2487 dest[i] = (long long)0xdeadbeefdeadbeef;
2488
2489 XNB_ASSERT(sscanf(mystr, "%lld", &dest[1]) == 1);
2490 for (i = 0; i < 3; i++)
2491 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2492 -123456789012345));
2493 }
2494
2495 /**
2496 * sscanf on unsigned long longs
2497 */
2498 static void
xnb_sscanf_llu(char * buffer,size_t buflen)2499 xnb_sscanf_llu(char *buffer, size_t buflen)
2500 {
2501 const char mystr[] = "12802747070103273189";
2502 unsigned long long dest[3];
2503 int i;
2504
2505 for (i = 0; i < 3; i++)
2506 dest[i] = (long long)0xdeadbeefdeadbeef;
2507
2508 XNB_ASSERT(sscanf(mystr, "%llu", &dest[1]) == 1);
2509 for (i = 0; i < 3; i++)
2510 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2511 12802747070103273189ull));
2512 }
2513
2514 /**
2515 * sscanf on unsigned short short n's
2516 */
2517 static void
xnb_sscanf_hhn(char * buffer,size_t buflen)2518 xnb_sscanf_hhn(char *buffer, size_t buflen)
2519 {
2520 const char mystr[] =
2521 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2522 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2523 "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2524 unsigned char dest[12];
2525 int i;
2526
2527 for (i = 0; i < 12; i++)
2528 dest[i] = (unsigned char)'X';
2529
2530 XNB_ASSERT(sscanf(mystr,
2531 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2532 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2533 "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]) == 0);
2534 for (i = 0; i < 12; i++)
2535 XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2536 }
2537