1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009-2011 Spectra Logic Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGES.
31 *
32 * Authors: Justin T. Gibbs (Spectra Logic Corporation)
33 * Alan Somers (Spectra Logic Corporation)
34 * John Suykerbuyk (Spectra Logic Corporation)
35 */
36
37 #include <sys/cdefs.h>
38 /**
39 * \file netback_unit_tests.c
40 *
41 * \brief Unit tests for the Xen netback driver.
42 *
43 * Due to the driver's use of static functions, these tests cannot be compiled
44 * standalone; they must be #include'd from the driver's .c file.
45 */
46
47 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
48 #define SNCATF(buffer, buflen, ...) do { \
49 size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \
50 buffer += new_chars; \
51 /* be careful; snprintf's return value can be > buflen */ \
52 buflen -= MIN(buflen, new_chars); \
53 } while (0)
54
55 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
56 #define STRINGIFY(x) #x
57 #define TOSTRING(x) STRINGIFY(x)
58
59 /**
60 * Writes an error message to buffer if cond is false
61 * Note the implied parameters buffer and
62 * buflen
63 */
64 #define XNB_ASSERT(cond) ({ \
65 int passed = (cond); \
66 char *_buffer = (buffer); \
67 size_t _buflen = (buflen); \
68 if (! passed) { \
69 strlcat(_buffer, __func__, _buflen); \
70 strlcat(_buffer, ":" TOSTRING(__LINE__) \
71 " Assertion Error: " #cond "\n", _buflen); \
72 } \
73 })
74
75 /**
76 * The signature used by all testcases. If the test writes anything
77 * to buffer, then it will be considered a failure
78 * \param buffer Return storage for error messages
79 * \param buflen The space available in the buffer
80 */
81 typedef void testcase_t(char *buffer, size_t buflen);
82
83 /**
84 * Signature used by setup functions
85 * \return nonzero on error
86 */
87 typedef int setup_t(void);
88
89 typedef void teardown_t(void);
90
91 /** A simple test fixture comprising setup, teardown, and test */
92 struct test_fixture {
93 /** Will be run before the test to allocate and initialize variables */
94 setup_t *setup;
95
96 /** Will be run if setup succeeds */
97 testcase_t *test;
98
99 /** Cleans up test data whether or not the setup succeeded */
100 teardown_t *teardown;
101 };
102
103 typedef struct test_fixture test_fixture_t;
104
105 static int xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
106 static int xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
107 char *buffer, size_t buflen);
108
109 static int __unused
null_setup(void)110 null_setup(void) { return 0; }
111
112 static void __unused
null_teardown(void)113 null_teardown(void) { }
114
115 static setup_t setup_pvt_data;
116 static teardown_t teardown_pvt_data;
117 static testcase_t xnb_ring2pkt_emptyring;
118 static testcase_t xnb_ring2pkt_1req;
119 static testcase_t xnb_ring2pkt_2req;
120 static testcase_t xnb_ring2pkt_3req;
121 static testcase_t xnb_ring2pkt_extra;
122 static testcase_t xnb_ring2pkt_partial;
123 static testcase_t xnb_ring2pkt_wraps;
124 static testcase_t xnb_txpkt2rsp_emptypkt;
125 static testcase_t xnb_txpkt2rsp_1req;
126 static testcase_t xnb_txpkt2rsp_extra;
127 static testcase_t xnb_txpkt2rsp_long;
128 static testcase_t xnb_txpkt2rsp_invalid;
129 static testcase_t xnb_txpkt2rsp_error;
130 static testcase_t xnb_txpkt2rsp_wraps;
131 static testcase_t xnb_pkt2mbufc_empty;
132 static testcase_t xnb_pkt2mbufc_short;
133 static testcase_t xnb_pkt2mbufc_csum;
134 static testcase_t xnb_pkt2mbufc_1cluster;
135 static testcase_t xnb_pkt2mbufc_largecluster;
136 static testcase_t xnb_pkt2mbufc_2cluster;
137 static testcase_t xnb_txpkt2gnttab_empty;
138 static testcase_t xnb_txpkt2gnttab_short;
139 static testcase_t xnb_txpkt2gnttab_2req;
140 static testcase_t xnb_txpkt2gnttab_2cluster;
141 static testcase_t xnb_update_mbufc_short;
142 static testcase_t xnb_update_mbufc_2req;
143 static testcase_t xnb_update_mbufc_2cluster;
144 static testcase_t xnb_mbufc2pkt_empty;
145 static testcase_t xnb_mbufc2pkt_short;
146 static testcase_t xnb_mbufc2pkt_1cluster;
147 static testcase_t xnb_mbufc2pkt_2short;
148 static testcase_t xnb_mbufc2pkt_long;
149 static testcase_t xnb_mbufc2pkt_extra;
150 static testcase_t xnb_mbufc2pkt_nospace;
151 static testcase_t xnb_rxpkt2gnttab_empty;
152 static testcase_t xnb_rxpkt2gnttab_short;
153 static testcase_t xnb_rxpkt2gnttab_2req;
154 static testcase_t xnb_rxpkt2rsp_empty;
155 static testcase_t xnb_rxpkt2rsp_short;
156 static testcase_t xnb_rxpkt2rsp_extra;
157 static testcase_t xnb_rxpkt2rsp_2short;
158 static testcase_t xnb_rxpkt2rsp_2slots;
159 static testcase_t xnb_rxpkt2rsp_copyerror;
160 static testcase_t xnb_sscanf_llu;
161 static testcase_t xnb_sscanf_lld;
162 static testcase_t xnb_sscanf_hhu;
163 static testcase_t xnb_sscanf_hhd;
164 static testcase_t xnb_sscanf_hhn;
165
166 #if defined(INET) || defined(INET6)
167 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
168 static testcase_t xnb_add_mbuf_cksum_arp;
169 static testcase_t xnb_add_mbuf_cksum_tcp;
170 static testcase_t xnb_add_mbuf_cksum_udp;
171 static testcase_t xnb_add_mbuf_cksum_icmp;
172 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
173 static void xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
174 uint16_t ip_id, uint16_t ip_p,
175 uint16_t ip_off, uint16_t ip_sum);
176 static void xnb_fill_tcp(struct mbuf *m);
177 #endif /* INET || INET6 */
178
179 /** Private data used by unit tests */
180 static struct {
181 gnttab_copy_table gnttab;
182 netif_rx_back_ring_t rxb;
183 netif_rx_front_ring_t rxf;
184 netif_tx_back_ring_t txb;
185 netif_tx_front_ring_t txf;
186 struct ifnet* ifp;
187 netif_rx_sring_t* rxs;
188 netif_tx_sring_t* txs;
189 } xnb_unit_pvt;
190
safe_m_freem(struct mbuf ** ppMbuf)191 static inline void safe_m_freem(struct mbuf **ppMbuf) {
192 if (*ppMbuf != NULL) {
193 m_freem(*ppMbuf);
194 *ppMbuf = NULL;
195 }
196 }
197
198 /**
199 * The unit test runner. It will run every supplied test and return an
200 * output message as a string
201 * \param tests An array of tests. Every test will be attempted.
202 * \param ntests The length of tests
203 * \param buffer Return storage for the result string
204 * \param buflen The length of buffer
205 * \return The number of tests that failed
206 */
207 static int
xnb_unit_test_runner(test_fixture_t const tests[],int ntests,char * buffer,size_t buflen)208 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
209 size_t buflen)
210 {
211 int i;
212 int n_passes;
213 int n_failures = 0;
214
215 for (i = 0; i < ntests; i++) {
216 int error = tests[i].setup();
217 if (error != 0) {
218 SNCATF(buffer, buflen,
219 "Setup failed for test idx %d\n", i);
220 n_failures++;
221 } else {
222 size_t new_chars;
223
224 tests[i].test(buffer, buflen);
225 new_chars = strnlen(buffer, buflen);
226 buffer += new_chars;
227 buflen -= new_chars;
228
229 if (new_chars > 0) {
230 n_failures++;
231 }
232 }
233 tests[i].teardown();
234 }
235
236 n_passes = ntests - n_failures;
237 if (n_passes > 0) {
238 SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
239 }
240 if (n_failures > 0) {
241 SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
242 }
243
244 return n_failures;
245 }
246
247 /** Number of unit tests. Must match the length of the tests array below */
248 #define TOTAL_TESTS (53)
249 /**
250 * Max memory available for returning results. 400 chars/test should give
251 * enough space for a five line error message for every test
252 */
253 #define TOTAL_BUFLEN (400 * TOTAL_TESTS + 2)
254
255 /**
256 * Called from userspace by a sysctl. Runs all internal unit tests, and
257 * returns the results to userspace as a string
258 * \param oidp unused
259 * \param arg1 pointer to an xnb_softc for a specific xnb device
260 * \param arg2 unused
261 * \param req sysctl access structure
262 * \return a string via the special SYSCTL_OUT macro.
263 */
264
265 static int
xnb_unit_test_main(SYSCTL_HANDLER_ARGS)266 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
267 test_fixture_t const tests[TOTAL_TESTS] = {
268 {setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
269 {setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
270 {setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
271 {setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
272 {setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
273 {setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
274 {setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
275 {setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
276 {setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
277 {setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
278 {setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
279 {setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
280 {setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
281 {setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
282 {setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
283 {setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
284 {setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
285 {setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
286 {setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
287 {setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
288 {setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
289 {setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
290 {setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
291 {setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
292 {setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
293 {setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
294 {setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
295 {setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
296 {setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
297 {setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
298 {setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
299 {setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
300 {setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
301 {setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
302 {setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
303 {setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
304 {setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
305 {setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
306 {setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
307 {setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
308 {setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
309 {setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
310 {setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
311 #if defined(INET) || defined(INET6)
312 {null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
313 {null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
314 {null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
315 {null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
316 {null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
317 #endif
318 {null_setup, xnb_sscanf_hhd, null_teardown},
319 {null_setup, xnb_sscanf_hhu, null_teardown},
320 {null_setup, xnb_sscanf_lld, null_teardown},
321 {null_setup, xnb_sscanf_llu, null_teardown},
322 {null_setup, xnb_sscanf_hhn, null_teardown},
323 };
324 /**
325 * results is static so that the data will persist after this function
326 * returns. The sysctl code expects us to return a constant string.
327 * \todo: the static variable is not thread safe. Put a mutex around
328 * it.
329 */
330 static char results[TOTAL_BUFLEN];
331
332 /* empty the result strings */
333 results[0] = 0;
334 xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
335
336 return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
337 }
338
339 static int
setup_pvt_data(void)340 setup_pvt_data(void)
341 {
342 int error = 0;
343
344 bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
345
346 xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
347 if (xnb_unit_pvt.txs != NULL) {
348 SHARED_RING_INIT(xnb_unit_pvt.txs);
349 BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
350 FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
351 } else {
352 error = 1;
353 }
354
355 xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
356 if (xnb_unit_pvt.ifp == NULL) {
357 error = 1;
358 }
359
360 xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
361 if (xnb_unit_pvt.rxs != NULL) {
362 SHARED_RING_INIT(xnb_unit_pvt.rxs);
363 BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
364 FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
365 } else {
366 error = 1;
367 }
368
369 return error;
370 }
371
372 static void
teardown_pvt_data(void)373 teardown_pvt_data(void)
374 {
375 if (xnb_unit_pvt.txs != NULL) {
376 free(xnb_unit_pvt.txs, M_XENNETBACK);
377 }
378 if (xnb_unit_pvt.rxs != NULL) {
379 free(xnb_unit_pvt.rxs, M_XENNETBACK);
380 }
381 if (xnb_unit_pvt.ifp != NULL) {
382 if_free(xnb_unit_pvt.ifp);
383 }
384 }
385
386 /**
387 * Verify that xnb_ring2pkt will not consume any requests from an empty ring
388 */
389 static void
xnb_ring2pkt_emptyring(char * buffer,size_t buflen)390 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
391 {
392 struct xnb_pkt pkt;
393 int num_consumed;
394
395 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
396 xnb_unit_pvt.txb.req_cons);
397 XNB_ASSERT(num_consumed == 0);
398 }
399
400 /**
401 * Verify that xnb_ring2pkt can convert a single request packet correctly
402 */
403 static void
xnb_ring2pkt_1req(char * buffer,size_t buflen)404 xnb_ring2pkt_1req(char *buffer, size_t buflen)
405 {
406 struct xnb_pkt pkt;
407 int num_consumed;
408 struct netif_tx_request *req;
409
410 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
411 xnb_unit_pvt.txf.req_prod_pvt);
412
413 req->flags = 0;
414 req->size = 69; /* arbitrary number for test */
415 xnb_unit_pvt.txf.req_prod_pvt++;
416
417 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
418
419 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
420 xnb_unit_pvt.txb.req_cons);
421 XNB_ASSERT(num_consumed == 1);
422 XNB_ASSERT(pkt.size == 69);
423 XNB_ASSERT(pkt.car_size == 69);
424 XNB_ASSERT(pkt.flags == 0);
425 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
426 XNB_ASSERT(pkt.list_len == 1);
427 XNB_ASSERT(pkt.car == 0);
428 }
429
430 /**
431 * Verify that xnb_ring2pkt can convert a two request packet correctly.
432 * This tests handling of the MORE_DATA flag and cdr
433 */
434 static void
xnb_ring2pkt_2req(char * buffer,size_t buflen)435 xnb_ring2pkt_2req(char *buffer, size_t buflen)
436 {
437 struct xnb_pkt pkt;
438 int num_consumed;
439 struct netif_tx_request *req;
440 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
441
442 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
443 xnb_unit_pvt.txf.req_prod_pvt);
444 req->flags = NETTXF_more_data;
445 req->size = 100;
446 xnb_unit_pvt.txf.req_prod_pvt++;
447
448 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
449 xnb_unit_pvt.txf.req_prod_pvt);
450 req->flags = 0;
451 req->size = 40;
452 xnb_unit_pvt.txf.req_prod_pvt++;
453
454 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
455
456 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
457 xnb_unit_pvt.txb.req_cons);
458 XNB_ASSERT(num_consumed == 2);
459 XNB_ASSERT(pkt.size == 100);
460 XNB_ASSERT(pkt.car_size == 60);
461 XNB_ASSERT(pkt.flags == 0);
462 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
463 XNB_ASSERT(pkt.list_len == 2);
464 XNB_ASSERT(pkt.car == start_idx);
465 XNB_ASSERT(pkt.cdr == start_idx + 1);
466 }
467
468 /**
469 * Verify that xnb_ring2pkt can convert a three request packet correctly
470 */
471 static void
xnb_ring2pkt_3req(char * buffer,size_t buflen)472 xnb_ring2pkt_3req(char *buffer, size_t buflen)
473 {
474 struct xnb_pkt pkt;
475 int num_consumed;
476 struct netif_tx_request *req;
477 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
478
479 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
480 xnb_unit_pvt.txf.req_prod_pvt);
481 req->flags = NETTXF_more_data;
482 req->size = 200;
483 xnb_unit_pvt.txf.req_prod_pvt++;
484
485 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
486 xnb_unit_pvt.txf.req_prod_pvt);
487 req->flags = NETTXF_more_data;
488 req->size = 40;
489 xnb_unit_pvt.txf.req_prod_pvt++;
490
491 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
492 xnb_unit_pvt.txf.req_prod_pvt);
493 req->flags = 0;
494 req->size = 50;
495 xnb_unit_pvt.txf.req_prod_pvt++;
496
497 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
498
499 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
500 xnb_unit_pvt.txb.req_cons);
501 XNB_ASSERT(num_consumed == 3);
502 XNB_ASSERT(pkt.size == 200);
503 XNB_ASSERT(pkt.car_size == 110);
504 XNB_ASSERT(pkt.flags == 0);
505 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
506 XNB_ASSERT(pkt.list_len == 3);
507 XNB_ASSERT(pkt.car == start_idx);
508 XNB_ASSERT(pkt.cdr == start_idx + 1);
509 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
510 }
511
512 /**
513 * Verify that xnb_ring2pkt can read extra inf
514 */
515 static void
xnb_ring2pkt_extra(char * buffer,size_t buflen)516 xnb_ring2pkt_extra(char *buffer, size_t buflen)
517 {
518 struct xnb_pkt pkt;
519 int num_consumed;
520 struct netif_tx_request *req;
521 struct netif_extra_info *ext;
522 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
523
524 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
525 xnb_unit_pvt.txf.req_prod_pvt);
526 req->flags = NETTXF_extra_info | NETTXF_more_data;
527 req->size = 150;
528 xnb_unit_pvt.txf.req_prod_pvt++;
529
530 ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
531 xnb_unit_pvt.txf.req_prod_pvt);
532 ext->flags = 0;
533 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
534 ext->u.gso.size = 250;
535 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
536 ext->u.gso.features = 0;
537 xnb_unit_pvt.txf.req_prod_pvt++;
538
539 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
540 xnb_unit_pvt.txf.req_prod_pvt);
541 req->flags = 0;
542 req->size = 50;
543 xnb_unit_pvt.txf.req_prod_pvt++;
544
545 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
546
547 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
548 xnb_unit_pvt.txb.req_cons);
549 XNB_ASSERT(num_consumed == 3);
550 XNB_ASSERT(pkt.extra.flags == 0);
551 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
552 XNB_ASSERT(pkt.extra.u.gso.size == 250);
553 XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
554 XNB_ASSERT(pkt.size == 150);
555 XNB_ASSERT(pkt.car_size == 100);
556 XNB_ASSERT(pkt.flags == NETTXF_extra_info);
557 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
558 XNB_ASSERT(pkt.list_len == 2);
559 XNB_ASSERT(pkt.car == start_idx);
560 XNB_ASSERT(pkt.cdr == start_idx + 2);
561 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
562 }
563
564 /**
565 * Verify that xnb_ring2pkt will consume no requests if the entire packet is
566 * not yet in the ring
567 */
568 static void
xnb_ring2pkt_partial(char * buffer,size_t buflen)569 xnb_ring2pkt_partial(char *buffer, size_t buflen)
570 {
571 struct xnb_pkt pkt;
572 int num_consumed;
573 struct netif_tx_request *req;
574
575 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
576 xnb_unit_pvt.txf.req_prod_pvt);
577 req->flags = NETTXF_more_data;
578 req->size = 150;
579 xnb_unit_pvt.txf.req_prod_pvt++;
580
581 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
582
583 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
584 xnb_unit_pvt.txb.req_cons);
585 XNB_ASSERT(num_consumed == 0);
586 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
587 }
588
589 /**
590 * Verity that xnb_ring2pkt can read a packet whose requests wrap around
591 * the end of the ring
592 */
593 static void
xnb_ring2pkt_wraps(char * buffer,size_t buflen)594 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
595 {
596 struct xnb_pkt pkt;
597 int num_consumed;
598 struct netif_tx_request *req;
599 unsigned int rsize;
600
601 /*
602 * Manually tweak the ring indices to create a ring with no responses
603 * and the next request slot at position 2 from the end
604 */
605 rsize = RING_SIZE(&xnb_unit_pvt.txf);
606 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
607 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
608 xnb_unit_pvt.txs->req_prod = rsize - 2;
609 xnb_unit_pvt.txs->req_event = rsize - 1;
610 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
611 xnb_unit_pvt.txs->rsp_event = rsize - 1;
612 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
613 xnb_unit_pvt.txb.req_cons = rsize - 2;
614
615 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
616 xnb_unit_pvt.txf.req_prod_pvt);
617 req->flags = NETTXF_more_data;
618 req->size = 550;
619 xnb_unit_pvt.txf.req_prod_pvt++;
620
621 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
622 xnb_unit_pvt.txf.req_prod_pvt);
623 req->flags = NETTXF_more_data;
624 req->size = 100;
625 xnb_unit_pvt.txf.req_prod_pvt++;
626
627 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
628 xnb_unit_pvt.txf.req_prod_pvt);
629 req->flags = 0;
630 req->size = 50;
631 xnb_unit_pvt.txf.req_prod_pvt++;
632
633 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
634
635 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
636 xnb_unit_pvt.txb.req_cons);
637 XNB_ASSERT(num_consumed == 3);
638 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
639 XNB_ASSERT(pkt.list_len == 3);
640 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
641 }
642
643 /**
644 * xnb_txpkt2rsp should do nothing for an empty packet
645 */
646 static void
xnb_txpkt2rsp_emptypkt(char * buffer,size_t buflen)647 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
648 {
649 struct xnb_pkt pkt;
650 netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
651 netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
652 pkt.list_len = 0;
653
654 /* must call xnb_ring2pkt just to intialize pkt */
655 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
656 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
657 XNB_ASSERT(
658 memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
659 XNB_ASSERT(
660 memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
661 }
662
663 /**
664 * xnb_txpkt2rsp responding to one request
665 */
666 static void
xnb_txpkt2rsp_1req(char * buffer,size_t buflen)667 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
668 {
669 uint16_t num_consumed;
670 struct xnb_pkt pkt;
671 struct netif_tx_request *req;
672 struct netif_tx_response *rsp;
673
674 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
675 xnb_unit_pvt.txf.req_prod_pvt);
676 req->size = 1000;
677 req->flags = 0;
678 xnb_unit_pvt.txf.req_prod_pvt++;
679
680 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
681
682 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
683 xnb_unit_pvt.txb.req_cons);
684 xnb_unit_pvt.txb.req_cons += num_consumed;
685
686 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
687 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
688
689 XNB_ASSERT(
690 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
691 XNB_ASSERT(rsp->id == req->id);
692 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
693 };
694
695 /**
696 * xnb_txpkt2rsp responding to 1 data request and 1 extra info
697 */
698 static void
xnb_txpkt2rsp_extra(char * buffer,size_t buflen)699 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
700 {
701 uint16_t num_consumed;
702 struct xnb_pkt pkt;
703 struct netif_tx_request *req;
704 netif_extra_info_t *ext;
705 struct netif_tx_response *rsp;
706
707 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
708 xnb_unit_pvt.txf.req_prod_pvt);
709 req->size = 1000;
710 req->flags = NETTXF_extra_info;
711 req->id = 69;
712 xnb_unit_pvt.txf.req_prod_pvt++;
713
714 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
715 xnb_unit_pvt.txf.req_prod_pvt);
716 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
717 ext->flags = 0;
718 xnb_unit_pvt.txf.req_prod_pvt++;
719
720 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
721
722 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
723 xnb_unit_pvt.txb.req_cons);
724 xnb_unit_pvt.txb.req_cons += num_consumed;
725
726 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
727
728 XNB_ASSERT(
729 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
730
731 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
732 XNB_ASSERT(rsp->id == req->id);
733 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
734
735 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
736 xnb_unit_pvt.txf.rsp_cons + 1);
737 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
738 };
739
740 /**
741 * xnb_pkg2rsp responding to 3 data requests and 1 extra info
742 */
743 static void
xnb_txpkt2rsp_long(char * buffer,size_t buflen)744 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
745 {
746 uint16_t num_consumed;
747 struct xnb_pkt pkt;
748 struct netif_tx_request *req;
749 netif_extra_info_t *ext;
750 struct netif_tx_response *rsp;
751
752 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
753 xnb_unit_pvt.txf.req_prod_pvt);
754 req->size = 1000;
755 req->flags = NETTXF_extra_info | NETTXF_more_data;
756 req->id = 254;
757 xnb_unit_pvt.txf.req_prod_pvt++;
758
759 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
760 xnb_unit_pvt.txf.req_prod_pvt);
761 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
762 ext->flags = 0;
763 xnb_unit_pvt.txf.req_prod_pvt++;
764
765 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
766 xnb_unit_pvt.txf.req_prod_pvt);
767 req->size = 300;
768 req->flags = NETTXF_more_data;
769 req->id = 1034;
770 xnb_unit_pvt.txf.req_prod_pvt++;
771
772 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
773 xnb_unit_pvt.txf.req_prod_pvt);
774 req->size = 400;
775 req->flags = 0;
776 req->id = 34;
777 xnb_unit_pvt.txf.req_prod_pvt++;
778
779 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
780
781 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
782 xnb_unit_pvt.txb.req_cons);
783 xnb_unit_pvt.txb.req_cons += num_consumed;
784
785 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
786
787 XNB_ASSERT(
788 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
789
790 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
791 XNB_ASSERT(rsp->id ==
792 RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
793 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
794
795 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
796 xnb_unit_pvt.txf.rsp_cons + 1);
797 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
798
799 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
800 xnb_unit_pvt.txf.rsp_cons + 2);
801 XNB_ASSERT(rsp->id ==
802 RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
803 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
804
805 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
806 xnb_unit_pvt.txf.rsp_cons + 3);
807 XNB_ASSERT(rsp->id ==
808 RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
809 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
810 }
811
812 /**
813 * xnb_txpkt2rsp responding to an invalid packet.
814 * Note: this test will result in an error message being printed to the console
815 * such as:
816 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255. Discarding packet
817 */
818 static void
xnb_txpkt2rsp_invalid(char * buffer,size_t buflen)819 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
820 {
821 uint16_t num_consumed;
822 struct xnb_pkt pkt;
823 struct netif_tx_request *req;
824 netif_extra_info_t *ext;
825 struct netif_tx_response *rsp;
826
827 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
828 xnb_unit_pvt.txf.req_prod_pvt);
829 req->size = 1000;
830 req->flags = NETTXF_extra_info;
831 req->id = 69;
832 xnb_unit_pvt.txf.req_prod_pvt++;
833
834 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
835 xnb_unit_pvt.txf.req_prod_pvt);
836 ext->type = 0xFF; /* Invalid extra type */
837 ext->flags = 0;
838 xnb_unit_pvt.txf.req_prod_pvt++;
839
840 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
841
842 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
843 xnb_unit_pvt.txb.req_cons);
844 xnb_unit_pvt.txb.req_cons += num_consumed;
845 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
846
847 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
848
849 XNB_ASSERT(
850 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
851
852 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
853 XNB_ASSERT(rsp->id == req->id);
854 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
855
856 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
857 xnb_unit_pvt.txf.rsp_cons + 1);
858 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
859 };
860
861 /**
862 * xnb_txpkt2rsp responding to one request which caused an error
863 */
864 static void
xnb_txpkt2rsp_error(char * buffer,size_t buflen)865 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
866 {
867 uint16_t num_consumed;
868 struct xnb_pkt pkt;
869 struct netif_tx_request *req;
870 struct netif_tx_response *rsp;
871
872 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
873 xnb_unit_pvt.txf.req_prod_pvt);
874 req->size = 1000;
875 req->flags = 0;
876 xnb_unit_pvt.txf.req_prod_pvt++;
877
878 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
879
880 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
881 xnb_unit_pvt.txb.req_cons);
882 xnb_unit_pvt.txb.req_cons += num_consumed;
883
884 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
885 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
886
887 XNB_ASSERT(
888 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
889 XNB_ASSERT(rsp->id == req->id);
890 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
891 };
892
893 /**
894 * xnb_txpkt2rsp's responses wrap around the end of the ring
895 */
896 static void
xnb_txpkt2rsp_wraps(char * buffer,size_t buflen)897 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
898 {
899 struct xnb_pkt pkt;
900 struct netif_tx_request *req;
901 struct netif_tx_response *rsp;
902 unsigned int rsize;
903
904 /*
905 * Manually tweak the ring indices to create a ring with no responses
906 * and the next request slot at position 2 from the end
907 */
908 rsize = RING_SIZE(&xnb_unit_pvt.txf);
909 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
910 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
911 xnb_unit_pvt.txs->req_prod = rsize - 2;
912 xnb_unit_pvt.txs->req_event = rsize - 1;
913 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
914 xnb_unit_pvt.txs->rsp_event = rsize - 1;
915 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
916 xnb_unit_pvt.txb.req_cons = rsize - 2;
917
918 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
919 xnb_unit_pvt.txf.req_prod_pvt);
920 req->flags = NETTXF_more_data;
921 req->size = 550;
922 req->id = 1;
923 xnb_unit_pvt.txf.req_prod_pvt++;
924
925 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
926 xnb_unit_pvt.txf.req_prod_pvt);
927 req->flags = NETTXF_more_data;
928 req->size = 100;
929 req->id = 2;
930 xnb_unit_pvt.txf.req_prod_pvt++;
931
932 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
933 xnb_unit_pvt.txf.req_prod_pvt);
934 req->flags = 0;
935 req->size = 50;
936 req->id = 3;
937 xnb_unit_pvt.txf.req_prod_pvt++;
938
939 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
940
941 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
942
943 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
944
945 XNB_ASSERT(
946 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
947 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
948 xnb_unit_pvt.txf.rsp_cons + 2);
949 XNB_ASSERT(rsp->id == req->id);
950 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
951 }
952
953 /**
954 * Helper function used to setup pkt2mbufc tests
955 * \param size size in bytes of the single request to push to the ring
956 * \param flags optional flags to put in the netif request
957 * \param[out] pkt the returned packet object
958 * \return number of requests consumed from the ring
959 */
960 static int
xnb_get1pkt(struct xnb_pkt * pkt,size_t size,uint16_t flags)961 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
962 {
963 struct netif_tx_request *req;
964
965 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
966 xnb_unit_pvt.txf.req_prod_pvt);
967 req->flags = flags;
968 req->size = size;
969 xnb_unit_pvt.txf.req_prod_pvt++;
970
971 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
972
973 return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
974 xnb_unit_pvt.txb.req_cons);
975 }
976
977 /**
978 * xnb_pkt2mbufc on an empty packet
979 */
980 static void
xnb_pkt2mbufc_empty(char * buffer,size_t buflen)981 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
982 {
983 struct xnb_pkt pkt;
984 struct mbuf *pMbuf;
985 pkt.list_len = 0;
986
987 /* must call xnb_ring2pkt just to intialize pkt */
988 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
989 pkt.size = 0;
990 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
991 safe_m_freem(&pMbuf);
992 }
993
994 /**
995 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
996 */
997 static void
xnb_pkt2mbufc_short(char * buffer,size_t buflen)998 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
999 {
1000 const size_t size = MINCLSIZE - 1;
1001 struct xnb_pkt pkt;
1002 struct mbuf *pMbuf;
1003
1004 xnb_get1pkt(&pkt, size, 0);
1005
1006 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1007 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1008 safe_m_freem(&pMbuf);
1009 }
1010
1011 /**
1012 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1013 */
1014 static void
xnb_pkt2mbufc_csum(char * buffer,size_t buflen)1015 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1016 {
1017 const size_t size = MINCLSIZE - 1;
1018 struct xnb_pkt pkt;
1019 struct mbuf *pMbuf;
1020
1021 xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1022
1023 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1024 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1025 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1026 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1027 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1028 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1029 safe_m_freem(&pMbuf);
1030 }
1031
1032 /**
1033 * xnb_pkt2mbufc on packet that can fit in one cluster
1034 */
1035 static void
xnb_pkt2mbufc_1cluster(char * buffer,size_t buflen)1036 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1037 {
1038 const size_t size = MINCLSIZE;
1039 struct xnb_pkt pkt;
1040 struct mbuf *pMbuf;
1041
1042 xnb_get1pkt(&pkt, size, 0);
1043
1044 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1045 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1046 safe_m_freem(&pMbuf);
1047 }
1048
1049 /**
1050 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1051 */
1052 static void
xnb_pkt2mbufc_largecluster(char * buffer,size_t buflen)1053 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1054 {
1055 const size_t size = MCLBYTES + 1;
1056 struct xnb_pkt pkt;
1057 struct mbuf *pMbuf;
1058
1059 xnb_get1pkt(&pkt, size, 0);
1060
1061 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1062 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1063 safe_m_freem(&pMbuf);
1064 }
1065
1066 /**
1067 * xnb_pkt2mbufc on packet that cannot fit in one clusters
1068 */
1069 static void
xnb_pkt2mbufc_2cluster(char * buffer,size_t buflen)1070 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1071 {
1072 const size_t size = 2 * MCLBYTES + 1;
1073 size_t space = 0;
1074 struct xnb_pkt pkt;
1075 struct mbuf *pMbuf;
1076 struct mbuf *m;
1077
1078 xnb_get1pkt(&pkt, size, 0);
1079
1080 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1081
1082 for (m = pMbuf; m != NULL; m = m->m_next) {
1083 space += M_TRAILINGSPACE(m);
1084 }
1085 XNB_ASSERT(space >= size);
1086 safe_m_freem(&pMbuf);
1087 }
1088
1089 /**
1090 * xnb_txpkt2gnttab on an empty packet. Should return empty gnttab
1091 */
1092 static void
xnb_txpkt2gnttab_empty(char * buffer,size_t buflen)1093 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1094 {
1095 int n_entries;
1096 struct xnb_pkt pkt;
1097 struct mbuf *pMbuf;
1098 pkt.list_len = 0;
1099
1100 /* must call xnb_ring2pkt just to intialize pkt */
1101 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1102 pkt.size = 0;
1103 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1104 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1105 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1106 XNB_ASSERT(n_entries == 0);
1107 safe_m_freem(&pMbuf);
1108 }
1109
1110 /**
1111 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1112 * and has one request
1113 */
1114 static void
xnb_txpkt2gnttab_short(char * buffer,size_t buflen)1115 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1116 {
1117 const size_t size = MINCLSIZE - 1;
1118 int n_entries;
1119 struct xnb_pkt pkt;
1120 struct mbuf *pMbuf;
1121
1122 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1123 xnb_unit_pvt.txf.req_prod_pvt);
1124 req->flags = 0;
1125 req->size = size;
1126 req->gref = 7;
1127 req->offset = 17;
1128 xnb_unit_pvt.txf.req_prod_pvt++;
1129
1130 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1131
1132 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1133
1134 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1135 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1136 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1137 XNB_ASSERT(n_entries == 1);
1138 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1139 /* flags should indicate gref's for source */
1140 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1141 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1142 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1143 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1144 mtod(pMbuf, vm_offset_t)));
1145 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1146 virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1147 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1148 safe_m_freem(&pMbuf);
1149 }
1150
1151 /**
1152 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1153 * mbuf cluster
1154 */
1155 static void
xnb_txpkt2gnttab_2req(char * buffer,size_t buflen)1156 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1157 {
1158 int n_entries;
1159 struct xnb_pkt pkt;
1160 struct mbuf *pMbuf;
1161
1162 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1163 xnb_unit_pvt.txf.req_prod_pvt);
1164 req->flags = NETTXF_more_data;
1165 req->size = 1900;
1166 req->gref = 7;
1167 req->offset = 0;
1168 xnb_unit_pvt.txf.req_prod_pvt++;
1169
1170 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1171 xnb_unit_pvt.txf.req_prod_pvt);
1172 req->flags = 0;
1173 req->size = 500;
1174 req->gref = 8;
1175 req->offset = 0;
1176 xnb_unit_pvt.txf.req_prod_pvt++;
1177
1178 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1179
1180 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1181
1182 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1183 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1184 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1185
1186 XNB_ASSERT(n_entries == 2);
1187 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1188 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1189 mtod(pMbuf, vm_offset_t)));
1190
1191 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1192 XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1193 mtod(pMbuf, vm_offset_t) + 1400));
1194 safe_m_freem(&pMbuf);
1195 }
1196
1197 /**
1198 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1199 */
1200 static void
xnb_txpkt2gnttab_2cluster(char * buffer,size_t buflen)1201 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1202 {
1203 int n_entries;
1204 struct xnb_pkt pkt;
1205 struct mbuf *pMbuf;
1206 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1207
1208 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1209 xnb_unit_pvt.txf.req_prod_pvt);
1210 req->flags = 0;
1211 req->size = data_this_transaction;
1212 req->gref = 8;
1213 req->offset = 0;
1214 xnb_unit_pvt.txf.req_prod_pvt++;
1215
1216 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1217 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1218
1219 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1220 XNB_ASSERT(pMbuf != NULL);
1221 if (pMbuf == NULL)
1222 return;
1223
1224 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1225 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1226
1227 if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1228 /* there should be three mbufs and three gnttab entries */
1229 XNB_ASSERT(n_entries == 3);
1230 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1231 XNB_ASSERT(
1232 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1233 mtod(pMbuf, vm_offset_t)));
1234 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1235
1236 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1237 XNB_ASSERT(
1238 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1239 mtod(pMbuf->m_next, vm_offset_t)));
1240 XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1241
1242 XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1243 XNB_ASSERT(
1244 xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1245 mtod(pMbuf->m_next, vm_offset_t)));
1246 XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1247 MCLBYTES);
1248 } else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1249 /* there should be two mbufs and two gnttab entries */
1250 XNB_ASSERT(n_entries == 2);
1251 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1252 XNB_ASSERT(
1253 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1254 mtod(pMbuf, vm_offset_t)));
1255 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1256
1257 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1258 XNB_ASSERT(
1259 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1260 mtod(pMbuf->m_next, vm_offset_t)));
1261 XNB_ASSERT(
1262 xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1263
1264 } else {
1265 /* should never get here */
1266 XNB_ASSERT(0);
1267 }
1268 m_freem(pMbuf);
1269 }
1270
1271 /**
1272 * xnb_update_mbufc on a short packet that only has one gnttab entry
1273 */
1274 static void
xnb_update_mbufc_short(char * buffer,size_t buflen)1275 xnb_update_mbufc_short(char *buffer, size_t buflen)
1276 {
1277 const size_t size = MINCLSIZE - 1;
1278 int n_entries;
1279 struct xnb_pkt pkt;
1280 struct mbuf *pMbuf;
1281
1282 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1283 xnb_unit_pvt.txf.req_prod_pvt);
1284 req->flags = 0;
1285 req->size = size;
1286 req->gref = 7;
1287 req->offset = 17;
1288 xnb_unit_pvt.txf.req_prod_pvt++;
1289
1290 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1291
1292 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1293
1294 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1295 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1296 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1297
1298 /* Update grant table's status fields as the hypervisor call would */
1299 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1300
1301 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1302 XNB_ASSERT(pMbuf->m_len == size);
1303 XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1304 safe_m_freem(&pMbuf);
1305 }
1306
1307 /**
1308 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1309 * mbuf cluster
1310 */
1311 static void
xnb_update_mbufc_2req(char * buffer,size_t buflen)1312 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1313 {
1314 int n_entries;
1315 struct xnb_pkt pkt;
1316 struct mbuf *pMbuf;
1317
1318 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1319 xnb_unit_pvt.txf.req_prod_pvt);
1320 req->flags = NETTXF_more_data;
1321 req->size = 1900;
1322 req->gref = 7;
1323 req->offset = 0;
1324 xnb_unit_pvt.txf.req_prod_pvt++;
1325
1326 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1327 xnb_unit_pvt.txf.req_prod_pvt);
1328 req->flags = 0;
1329 req->size = 500;
1330 req->gref = 8;
1331 req->offset = 0;
1332 xnb_unit_pvt.txf.req_prod_pvt++;
1333
1334 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1335
1336 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1337
1338 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1339 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1340 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1341
1342 /* Update grant table's status fields as the hypervisor call would */
1343 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1344 xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1345
1346 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1347 XNB_ASSERT(n_entries == 2);
1348 XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1349 XNB_ASSERT(pMbuf->m_len == 1900);
1350
1351 safe_m_freem(&pMbuf);
1352 }
1353
1354 /**
1355 * xnb_update_mbufc on a single request that spans two mbuf clusters
1356 */
1357 static void
xnb_update_mbufc_2cluster(char * buffer,size_t buflen)1358 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1359 {
1360 int i;
1361 int n_entries;
1362 struct xnb_pkt pkt;
1363 struct mbuf *pMbuf;
1364 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1365
1366 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1367 xnb_unit_pvt.txf.req_prod_pvt);
1368 req->flags = 0;
1369 req->size = data_this_transaction;
1370 req->gref = 8;
1371 req->offset = 0;
1372 xnb_unit_pvt.txf.req_prod_pvt++;
1373
1374 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1375 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1376
1377 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1378 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1379 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1380
1381 /* Update grant table's status fields */
1382 for (i = 0; i < n_entries; i++) {
1383 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1384 }
1385 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1386
1387 if (n_entries == 3) {
1388 /* there should be three mbufs and three gnttab entries */
1389 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1390 XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1391 XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1392 XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1393 } else if (n_entries == 2) {
1394 /* there should be two mbufs and two gnttab entries */
1395 XNB_ASSERT(n_entries == 2);
1396 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1397 XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1398 XNB_ASSERT(pMbuf->m_next->m_len == 1);
1399 } else {
1400 /* should never get here */
1401 XNB_ASSERT(0);
1402 }
1403 safe_m_freem(&pMbuf);
1404 }
1405
1406 /** xnb_mbufc2pkt on an empty mbufc */
1407 static void
xnb_mbufc2pkt_empty(char * buffer,size_t buflen)1408 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1409 struct xnb_pkt pkt;
1410 int free_slots = 64;
1411 struct mbuf *mbuf;
1412
1413 mbuf = m_get(M_WAITOK, MT_DATA);
1414 /*
1415 * note: it is illegal to set M_PKTHDR on a mbuf with no data. Doing so
1416 * will cause m_freem to segfault
1417 */
1418 XNB_ASSERT(mbuf->m_len == 0);
1419
1420 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1421 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1422
1423 safe_m_freem(&mbuf);
1424 }
1425
1426 /** xnb_mbufc2pkt on a short mbufc */
1427 static void
xnb_mbufc2pkt_short(char * buffer,size_t buflen)1428 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1429 struct xnb_pkt pkt;
1430 size_t size = 128;
1431 int free_slots = 64;
1432 RING_IDX start = 9;
1433 struct mbuf *mbuf;
1434
1435 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1436 mbuf->m_flags |= M_PKTHDR;
1437 mbuf->m_pkthdr.len = size;
1438 mbuf->m_len = size;
1439
1440 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1441 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1442 XNB_ASSERT(pkt.size == size);
1443 XNB_ASSERT(pkt.car_size == size);
1444 XNB_ASSERT(! (pkt.flags &
1445 (NETRXF_more_data | NETRXF_extra_info)));
1446 XNB_ASSERT(pkt.list_len == 1);
1447 XNB_ASSERT(pkt.car == start);
1448
1449 safe_m_freem(&mbuf);
1450 }
1451
1452 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1453 static void
xnb_mbufc2pkt_1cluster(char * buffer,size_t buflen)1454 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1455 struct xnb_pkt pkt;
1456 size_t size = MCLBYTES;
1457 int free_slots = 32;
1458 RING_IDX start = 12;
1459 struct mbuf *mbuf;
1460
1461 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1462 mbuf->m_flags |= M_PKTHDR;
1463 mbuf->m_pkthdr.len = size;
1464 mbuf->m_len = size;
1465
1466 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1467 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1468 XNB_ASSERT(pkt.size == size);
1469 XNB_ASSERT(pkt.car_size == size);
1470 XNB_ASSERT(! (pkt.flags &
1471 (NETRXF_more_data | NETRXF_extra_info)));
1472 XNB_ASSERT(pkt.list_len == 1);
1473 XNB_ASSERT(pkt.car == start);
1474
1475 safe_m_freem(&mbuf);
1476 }
1477
1478 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1479 static void
xnb_mbufc2pkt_2short(char * buffer,size_t buflen)1480 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1481 struct xnb_pkt pkt;
1482 size_t size1 = MHLEN - 5;
1483 size_t size2 = MHLEN - 15;
1484 int free_slots = 32;
1485 RING_IDX start = 14;
1486 struct mbuf *mbufc, *mbufc2;
1487
1488 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1489 XNB_ASSERT(mbufc != NULL);
1490 if (mbufc == NULL)
1491 return;
1492 mbufc->m_flags |= M_PKTHDR;
1493
1494 mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1495 XNB_ASSERT(mbufc2 != NULL);
1496 if (mbufc2 == NULL) {
1497 safe_m_freem(&mbufc);
1498 return;
1499 }
1500 mbufc2->m_pkthdr.len = size1 + size2;
1501 mbufc2->m_len = size1;
1502
1503 xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1504 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1505 XNB_ASSERT(pkt.size == size1 + size2);
1506 XNB_ASSERT(pkt.car == start);
1507 /*
1508 * The second m_getm may allocate a new mbuf and append
1509 * it to the chain, or it may simply extend the first mbuf.
1510 */
1511 if (mbufc2->m_next != NULL) {
1512 XNB_ASSERT(pkt.car_size == size1);
1513 XNB_ASSERT(pkt.list_len == 1);
1514 XNB_ASSERT(pkt.cdr == start + 1);
1515 }
1516
1517 safe_m_freem(&mbufc2);
1518 }
1519
1520 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1521 static void
xnb_mbufc2pkt_long(char * buffer,size_t buflen)1522 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1523 struct xnb_pkt pkt;
1524 size_t size = 14 * MCLBYTES / 3;
1525 size_t size_remaining;
1526 int free_slots = 15;
1527 RING_IDX start = 3;
1528 struct mbuf *mbufc, *m;
1529
1530 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1531 XNB_ASSERT(mbufc != NULL);
1532 if (mbufc == NULL)
1533 return;
1534 mbufc->m_flags |= M_PKTHDR;
1535
1536 mbufc->m_pkthdr.len = size;
1537 size_remaining = size;
1538 for (m = mbufc; m != NULL; m = m->m_next) {
1539 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1540 size_remaining -= m->m_len;
1541 }
1542
1543 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1544 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1545 XNB_ASSERT(pkt.size == size);
1546 XNB_ASSERT(pkt.car == start);
1547 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1548 /*
1549 * There should be >1 response in the packet, and there is no
1550 * extra info.
1551 */
1552 XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1553 XNB_ASSERT(pkt.cdr == pkt.car + 1);
1554
1555 safe_m_freem(&mbufc);
1556 }
1557
1558 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1559 static void
xnb_mbufc2pkt_extra(char * buffer,size_t buflen)1560 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1561 struct xnb_pkt pkt;
1562 size_t size = 14 * MCLBYTES / 3;
1563 size_t size_remaining;
1564 int free_slots = 15;
1565 RING_IDX start = 3;
1566 struct mbuf *mbufc, *m;
1567
1568 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1569 XNB_ASSERT(mbufc != NULL);
1570 if (mbufc == NULL)
1571 return;
1572
1573 mbufc->m_flags |= M_PKTHDR;
1574 mbufc->m_pkthdr.len = size;
1575 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1576 mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1577 size_remaining = size;
1578 for (m = mbufc; m != NULL; m = m->m_next) {
1579 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1580 size_remaining -= m->m_len;
1581 }
1582
1583 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1584 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1585 XNB_ASSERT(pkt.size == size);
1586 XNB_ASSERT(pkt.car == start);
1587 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1588 /* There should be >1 response in the packet, there is extra info */
1589 XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1590 XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1591 XNB_ASSERT(pkt.cdr == pkt.car + 2);
1592 XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1593 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1594 XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1595
1596 safe_m_freem(&mbufc);
1597 }
1598
1599 /** xnb_mbufc2pkt with insufficient space in the ring */
1600 static void
xnb_mbufc2pkt_nospace(char * buffer,size_t buflen)1601 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1602 struct xnb_pkt pkt;
1603 size_t size = 14 * MCLBYTES / 3;
1604 size_t size_remaining;
1605 int free_slots = 2;
1606 RING_IDX start = 3;
1607 struct mbuf *mbufc, *m;
1608 int error;
1609
1610 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1611 XNB_ASSERT(mbufc != NULL);
1612 if (mbufc == NULL)
1613 return;
1614 mbufc->m_flags |= M_PKTHDR;
1615
1616 mbufc->m_pkthdr.len = size;
1617 size_remaining = size;
1618 for (m = mbufc; m != NULL; m = m->m_next) {
1619 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1620 size_remaining -= m->m_len;
1621 }
1622
1623 error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1624 XNB_ASSERT(error == EAGAIN);
1625 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1626
1627 safe_m_freem(&mbufc);
1628 }
1629
1630 /**
1631 * xnb_rxpkt2gnttab on an empty packet. Should return empty gnttab
1632 */
1633 static void
xnb_rxpkt2gnttab_empty(char * buffer,size_t buflen)1634 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1635 {
1636 struct xnb_pkt pkt;
1637 int nr_entries;
1638 int free_slots = 60;
1639 struct mbuf *mbuf;
1640
1641 mbuf = m_get(M_WAITOK, MT_DATA);
1642
1643 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1644 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1645 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1646
1647 XNB_ASSERT(nr_entries == 0);
1648
1649 safe_m_freem(&mbuf);
1650 }
1651
1652 /** xnb_rxpkt2gnttab on a short packet without extra data */
1653 static void
xnb_rxpkt2gnttab_short(char * buffer,size_t buflen)1654 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1655 struct xnb_pkt pkt;
1656 int nr_entries;
1657 size_t size = 128;
1658 int free_slots = 60;
1659 RING_IDX start = 9;
1660 struct netif_rx_request *req;
1661 struct mbuf *mbuf;
1662
1663 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1664 mbuf->m_flags |= M_PKTHDR;
1665 mbuf->m_pkthdr.len = size;
1666 mbuf->m_len = size;
1667
1668 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1669 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1670 xnb_unit_pvt.txf.req_prod_pvt);
1671 req->gref = 7;
1672
1673 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1674 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1675
1676 XNB_ASSERT(nr_entries == 1);
1677 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1678 /* flags should indicate gref's for dest */
1679 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1680 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1681 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1682 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1683 mtod(mbuf, vm_offset_t)));
1684 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1685 virt_to_mfn(mtod(mbuf, vm_offset_t)));
1686 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1687
1688 safe_m_freem(&mbuf);
1689 }
1690
1691 /**
1692 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1693 */
1694 static void
xnb_rxpkt2gnttab_2req(char * buffer,size_t buflen)1695 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1696 {
1697 struct xnb_pkt pkt;
1698 int nr_entries;
1699 int i, num_mbufs;
1700 size_t total_granted_size = 0;
1701 size_t size = MJUMPAGESIZE + 1;
1702 int free_slots = 60;
1703 RING_IDX start = 11;
1704 struct netif_rx_request *req;
1705 struct mbuf *mbuf, *m;
1706
1707 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1708 mbuf->m_flags |= M_PKTHDR;
1709 mbuf->m_pkthdr.len = size;
1710 mbuf->m_len = size;
1711
1712 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1713
1714 for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1715 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1716 xnb_unit_pvt.txf.req_prod_pvt);
1717 req->gref = i;
1718 req->id = 5;
1719 }
1720 num_mbufs = i;
1721
1722 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1723 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1724
1725 XNB_ASSERT(nr_entries >= num_mbufs);
1726 for (i = 0; i < nr_entries; i++) {
1727 int end_offset = xnb_unit_pvt.gnttab[i].len +
1728 xnb_unit_pvt.gnttab[i].dest.offset;
1729 XNB_ASSERT(end_offset <= PAGE_SIZE);
1730 total_granted_size += xnb_unit_pvt.gnttab[i].len;
1731 }
1732 XNB_ASSERT(total_granted_size == size);
1733 }
1734
1735 /**
1736 * xnb_rxpkt2rsp on an empty packet. Shouldn't make any response
1737 */
1738 static void
xnb_rxpkt2rsp_empty(char * buffer,size_t buflen)1739 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1740 {
1741 struct xnb_pkt pkt;
1742 int nr_entries;
1743 int nr_reqs;
1744 int free_slots = 60;
1745 netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1746 netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1747 struct mbuf *mbuf;
1748
1749 mbuf = m_get(M_WAITOK, MT_DATA);
1750
1751 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1752 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1753 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1754
1755 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1756 &xnb_unit_pvt.rxb);
1757 XNB_ASSERT(nr_reqs == 0);
1758 XNB_ASSERT(
1759 memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1760 XNB_ASSERT(
1761 memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1762
1763 safe_m_freem(&mbuf);
1764 }
1765
1766 /**
1767 * xnb_rxpkt2rsp on a short packet with no extras
1768 */
1769 static void
xnb_rxpkt2rsp_short(char * buffer,size_t buflen)1770 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1771 {
1772 struct xnb_pkt pkt;
1773 int nr_entries, nr_reqs;
1774 size_t size = 128;
1775 int free_slots = 60;
1776 RING_IDX start = 5;
1777 struct netif_rx_request *req;
1778 struct netif_rx_response *rsp;
1779 struct mbuf *mbuf;
1780
1781 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1782 mbuf->m_flags |= M_PKTHDR;
1783 mbuf->m_pkthdr.len = size;
1784 mbuf->m_len = size;
1785
1786 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1787 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1788 req->gref = 7;
1789 xnb_unit_pvt.rxb.req_cons = start;
1790 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1791 xnb_unit_pvt.rxs->req_prod = start + 1;
1792 xnb_unit_pvt.rxs->rsp_prod = start;
1793
1794 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1795 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1796
1797 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1798 &xnb_unit_pvt.rxb);
1799
1800 XNB_ASSERT(nr_reqs == 1);
1801 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1802 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1803 XNB_ASSERT(rsp->id == req->id);
1804 XNB_ASSERT(rsp->offset == 0);
1805 XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1806 XNB_ASSERT(rsp->status == size);
1807
1808 safe_m_freem(&mbuf);
1809 }
1810
1811 /**
1812 * xnb_rxpkt2rsp with extra data
1813 */
1814 static void
xnb_rxpkt2rsp_extra(char * buffer,size_t buflen)1815 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1816 {
1817 struct xnb_pkt pkt;
1818 int nr_entries, nr_reqs;
1819 size_t size = 14;
1820 int free_slots = 15;
1821 RING_IDX start = 3;
1822 uint16_t id = 49;
1823 uint16_t gref = 65;
1824 uint16_t mss = TCP_MSS - 40;
1825 struct mbuf *mbufc;
1826 struct netif_rx_request *req;
1827 struct netif_rx_response *rsp;
1828 struct netif_extra_info *ext;
1829
1830 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1831 XNB_ASSERT(mbufc != NULL);
1832 if (mbufc == NULL)
1833 return;
1834
1835 mbufc->m_flags |= M_PKTHDR;
1836 mbufc->m_pkthdr.len = size;
1837 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1838 mbufc->m_pkthdr.tso_segsz = mss;
1839 mbufc->m_len = size;
1840
1841 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1842 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1843 req->id = id;
1844 req->gref = gref;
1845 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1846 req->id = id + 1;
1847 req->gref = gref + 1;
1848 xnb_unit_pvt.rxb.req_cons = start;
1849 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1850 xnb_unit_pvt.rxs->req_prod = start + 2;
1851 xnb_unit_pvt.rxs->rsp_prod = start;
1852
1853 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1854 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1855
1856 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1857 &xnb_unit_pvt.rxb);
1858
1859 XNB_ASSERT(nr_reqs == 2);
1860 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1861 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1862 XNB_ASSERT(rsp->id == id);
1863 XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1864 XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1865 XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1866 XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1867 XNB_ASSERT(rsp->status == size);
1868
1869 ext = (struct netif_extra_info*)
1870 RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1871 XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1872 XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1873 XNB_ASSERT(ext->u.gso.size == mss);
1874 XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1875
1876 safe_m_freem(&mbufc);
1877 }
1878
1879 /**
1880 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data. It should
1881 * generate two response slot
1882 */
1883 static void
xnb_rxpkt2rsp_2slots(char * buffer,size_t buflen)1884 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1885 {
1886 struct xnb_pkt pkt;
1887 int nr_entries, nr_reqs;
1888 size_t size = PAGE_SIZE + 100;
1889 int free_slots = 3;
1890 uint16_t id1 = 17;
1891 uint16_t id2 = 37;
1892 uint16_t gref1 = 24;
1893 uint16_t gref2 = 34;
1894 RING_IDX start = 15;
1895 struct netif_rx_request *req;
1896 struct netif_rx_response *rsp;
1897 struct mbuf *mbuf;
1898
1899 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1900 mbuf->m_flags |= M_PKTHDR;
1901 mbuf->m_pkthdr.len = size;
1902 if (mbuf->m_next != NULL) {
1903 size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1904 mbuf->m_len = first_len;
1905 mbuf->m_next->m_len = size - first_len;
1906
1907 } else {
1908 mbuf->m_len = size;
1909 }
1910
1911 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1912 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1913 req->gref = gref1;
1914 req->id = id1;
1915 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1916 req->gref = gref2;
1917 req->id = id2;
1918 xnb_unit_pvt.rxb.req_cons = start;
1919 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1920 xnb_unit_pvt.rxs->req_prod = start + 2;
1921 xnb_unit_pvt.rxs->rsp_prod = start;
1922
1923 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1924 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1925
1926 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1927 &xnb_unit_pvt.rxb);
1928
1929 XNB_ASSERT(nr_reqs == 2);
1930 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1931 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1932 XNB_ASSERT(rsp->id == id1);
1933 XNB_ASSERT(rsp->offset == 0);
1934 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1935 XNB_ASSERT(rsp->flags & NETRXF_more_data);
1936 XNB_ASSERT(rsp->status == PAGE_SIZE);
1937
1938 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1939 XNB_ASSERT(rsp->id == id2);
1940 XNB_ASSERT(rsp->offset == 0);
1941 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1942 XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1943 XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1944
1945 safe_m_freem(&mbuf);
1946 }
1947
1948 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1949 static void
xnb_rxpkt2rsp_2short(char * buffer,size_t buflen)1950 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1951 struct xnb_pkt pkt;
1952 int nr_reqs, nr_entries;
1953 size_t size1 = MHLEN - 5;
1954 size_t size2 = MHLEN - 15;
1955 int free_slots = 32;
1956 RING_IDX start = 14;
1957 uint16_t id = 47;
1958 uint16_t gref = 54;
1959 struct netif_rx_request *req;
1960 struct netif_rx_response *rsp;
1961 struct mbuf *mbufc;
1962
1963 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1964 XNB_ASSERT(mbufc != NULL);
1965 if (mbufc == NULL)
1966 return;
1967 mbufc->m_flags |= M_PKTHDR;
1968
1969 m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1970 XNB_ASSERT(mbufc->m_next != NULL);
1971 mbufc->m_pkthdr.len = size1 + size2;
1972 mbufc->m_len = size1;
1973 mbufc->m_next->m_len = size2;
1974
1975 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1976
1977 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1978 req->gref = gref;
1979 req->id = id;
1980 xnb_unit_pvt.rxb.req_cons = start;
1981 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1982 xnb_unit_pvt.rxs->req_prod = start + 1;
1983 xnb_unit_pvt.rxs->rsp_prod = start;
1984
1985 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1986 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1987
1988 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1989 &xnb_unit_pvt.rxb);
1990
1991 XNB_ASSERT(nr_entries == 2);
1992 XNB_ASSERT(nr_reqs == 1);
1993 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1994 XNB_ASSERT(rsp->id == id);
1995 XNB_ASSERT(rsp->status == size1 + size2);
1996 XNB_ASSERT(rsp->offset == 0);
1997 XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
1998
1999 safe_m_freem(&mbufc);
2000 }
2001
2002 /**
2003 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2004 * Note: this test will result in an error message being printed to the console
2005 * such as:
2006 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2007 */
2008 static void
xnb_rxpkt2rsp_copyerror(char * buffer,size_t buflen)2009 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2010 {
2011 struct xnb_pkt pkt;
2012 int nr_entries, nr_reqs;
2013 int id = 7;
2014 int gref = 42;
2015 uint16_t canary = 6859;
2016 size_t size = 7 * MCLBYTES;
2017 int free_slots = 9;
2018 RING_IDX start = 2;
2019 struct netif_rx_request *req;
2020 struct netif_rx_response *rsp;
2021 struct mbuf *mbuf;
2022
2023 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2024 mbuf->m_flags |= M_PKTHDR;
2025 mbuf->m_pkthdr.len = size;
2026 mbuf->m_len = size;
2027
2028 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2029 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2030 req->gref = gref;
2031 req->id = id;
2032 xnb_unit_pvt.rxb.req_cons = start;
2033 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2034 xnb_unit_pvt.rxs->req_prod = start + 1;
2035 xnb_unit_pvt.rxs->rsp_prod = start;
2036 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2037 req->gref = canary;
2038 req->id = canary;
2039
2040 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2041 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2042 /* Inject the error*/
2043 xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2044
2045 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2046 &xnb_unit_pvt.rxb);
2047
2048 XNB_ASSERT(nr_reqs == 1);
2049 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2050 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2051 XNB_ASSERT(rsp->id == id);
2052 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2053 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2054 XNB_ASSERT(req->gref == canary);
2055 XNB_ASSERT(req->id == canary);
2056
2057 safe_m_freem(&mbuf);
2058 }
2059
2060 #if defined(INET) || defined(INET6)
2061 /**
2062 * xnb_add_mbuf_cksum on an ARP request packet
2063 */
2064 static void
xnb_add_mbuf_cksum_arp(char * buffer,size_t buflen)2065 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2066 {
2067 const size_t pkt_len = sizeof(struct ether_header) +
2068 sizeof(struct ether_arp);
2069 struct mbuf *mbufc;
2070 struct ether_header *eh;
2071 struct ether_arp *ep;
2072 unsigned char pkt_orig[pkt_len];
2073
2074 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2075 /* Fill in an example arp request */
2076 eh = mtod(mbufc, struct ether_header*);
2077 eh->ether_dhost[0] = 0xff;
2078 eh->ether_dhost[1] = 0xff;
2079 eh->ether_dhost[2] = 0xff;
2080 eh->ether_dhost[3] = 0xff;
2081 eh->ether_dhost[4] = 0xff;
2082 eh->ether_dhost[5] = 0xff;
2083 eh->ether_shost[0] = 0x00;
2084 eh->ether_shost[1] = 0x15;
2085 eh->ether_shost[2] = 0x17;
2086 eh->ether_shost[3] = 0xe9;
2087 eh->ether_shost[4] = 0x30;
2088 eh->ether_shost[5] = 0x68;
2089 eh->ether_type = htons(ETHERTYPE_ARP);
2090 ep = (struct ether_arp*)(eh + 1);
2091 ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2092 ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2093 ep->ea_hdr.ar_hln = 6;
2094 ep->ea_hdr.ar_pln = 4;
2095 ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2096 ep->arp_sha[0] = 0x00;
2097 ep->arp_sha[1] = 0x15;
2098 ep->arp_sha[2] = 0x17;
2099 ep->arp_sha[3] = 0xe9;
2100 ep->arp_sha[4] = 0x30;
2101 ep->arp_sha[5] = 0x68;
2102 ep->arp_spa[0] = 0xc0;
2103 ep->arp_spa[1] = 0xa8;
2104 ep->arp_spa[2] = 0x0a;
2105 ep->arp_spa[3] = 0x04;
2106 bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2107 ep->arp_tpa[0] = 0xc0;
2108 ep->arp_tpa[1] = 0xa8;
2109 ep->arp_tpa[2] = 0x0a;
2110 ep->arp_tpa[3] = 0x06;
2111
2112 /* fill in the length field */
2113 mbufc->m_len = pkt_len;
2114 mbufc->m_pkthdr.len = pkt_len;
2115 /* indicate that the netfront uses hw-assisted checksums */
2116 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2117 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2118
2119 /* Make a backup copy of the packet */
2120 bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2121
2122 /* Function under test */
2123 xnb_add_mbuf_cksum(mbufc);
2124
2125 /* Verify that the packet's data did not change */
2126 XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2127 m_freem(mbufc);
2128 }
2129
2130 /**
2131 * Helper function that populates the ethernet header and IP header used by
2132 * some of the xnb_add_mbuf_cksum unit tests. m must already be allocated
2133 * and must be large enough
2134 */
2135 static void
xnb_fill_eh_and_ip(struct mbuf * m,uint16_t ip_len,uint16_t ip_id,uint16_t ip_p,uint16_t ip_off,uint16_t ip_sum)2136 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2137 uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2138 {
2139 struct ether_header *eh;
2140 struct ip *iph;
2141
2142 eh = mtod(m, struct ether_header*);
2143 eh->ether_dhost[0] = 0x00;
2144 eh->ether_dhost[1] = 0x16;
2145 eh->ether_dhost[2] = 0x3e;
2146 eh->ether_dhost[3] = 0x23;
2147 eh->ether_dhost[4] = 0x50;
2148 eh->ether_dhost[5] = 0x0b;
2149 eh->ether_shost[0] = 0x00;
2150 eh->ether_shost[1] = 0x16;
2151 eh->ether_shost[2] = 0x30;
2152 eh->ether_shost[3] = 0x00;
2153 eh->ether_shost[4] = 0x00;
2154 eh->ether_shost[5] = 0x00;
2155 eh->ether_type = htons(ETHERTYPE_IP);
2156 iph = (struct ip*)(eh + 1);
2157 iph->ip_hl = 0x5; /* 5 dwords == 20 bytes */
2158 iph->ip_v = 4; /* IP v4 */
2159 iph->ip_tos = 0;
2160 iph->ip_len = htons(ip_len);
2161 iph->ip_id = htons(ip_id);
2162 iph->ip_off = htons(ip_off);
2163 iph->ip_ttl = 64;
2164 iph->ip_p = ip_p;
2165 iph->ip_sum = htons(ip_sum);
2166 iph->ip_src.s_addr = htonl(0xc0a80a04);
2167 iph->ip_dst.s_addr = htonl(0xc0a80a05);
2168 }
2169
2170 /**
2171 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2172 * ICMP packet
2173 */
2174 static void
xnb_add_mbuf_cksum_icmp(char * buffer,size_t buflen)2175 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2176 {
2177 const size_t icmp_len = 64; /* set by ping(1) */
2178 const size_t pkt_len = sizeof(struct ether_header) +
2179 sizeof(struct ip) + icmp_len;
2180 struct mbuf *mbufc;
2181 struct ether_header *eh;
2182 struct ip *iph;
2183 struct icmp *icmph;
2184 unsigned char pkt_orig[icmp_len];
2185 uint32_t *tv_field;
2186 uint8_t *data_payload;
2187 int i;
2188 const uint16_t ICMP_CSUM = 0xaed7;
2189 const uint16_t IP_CSUM = 0xe533;
2190
2191 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2192 /* Fill in an example ICMP ping request */
2193 eh = mtod(mbufc, struct ether_header*);
2194 xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2195 iph = (struct ip*)(eh + 1);
2196 icmph = (struct icmp*)(iph + 1);
2197 icmph->icmp_type = ICMP_ECHO;
2198 icmph->icmp_code = 0;
2199 icmph->icmp_cksum = htons(ICMP_CSUM);
2200 icmph->icmp_id = htons(31492);
2201 icmph->icmp_seq = htons(0);
2202 /*
2203 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2204 * For this test, we will set the bytes individually for portability.
2205 */
2206 tv_field = (uint32_t*)(&(icmph->icmp_hun));
2207 tv_field[0] = 0x4f02cfac;
2208 tv_field[1] = 0x0007c46a;
2209 /*
2210 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2211 */
2212 data_payload = (uint8_t*)(&tv_field[2]);
2213 for (i = 8; i < 37; i++) {
2214 *data_payload++ = i;
2215 }
2216
2217 /* fill in the length field */
2218 mbufc->m_len = pkt_len;
2219 mbufc->m_pkthdr.len = pkt_len;
2220 /* indicate that the netfront uses hw-assisted checksums */
2221 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2222 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2223
2224 bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2225 /* Function under test */
2226 xnb_add_mbuf_cksum(mbufc);
2227
2228 /* Check the IP checksum */
2229 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2230
2231 /* Check that the ICMP packet did not change */
2232 XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2233 m_freem(mbufc);
2234 }
2235
2236 /**
2237 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2238 * UDP packet
2239 */
2240 static void
xnb_add_mbuf_cksum_udp(char * buffer,size_t buflen)2241 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2242 {
2243 const size_t udp_len = 16;
2244 const size_t pkt_len = sizeof(struct ether_header) +
2245 sizeof(struct ip) + udp_len;
2246 struct mbuf *mbufc;
2247 struct ether_header *eh;
2248 struct ip *iph;
2249 struct udphdr *udp;
2250 uint8_t *data_payload;
2251 const uint16_t IP_CSUM = 0xe56b;
2252 const uint16_t UDP_CSUM = 0xdde2;
2253
2254 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2255 /* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2256 eh = mtod(mbufc, struct ether_header*);
2257 xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2258 iph = (struct ip*)(eh + 1);
2259 udp = (struct udphdr*)(iph + 1);
2260 udp->uh_sport = htons(0x51ae);
2261 udp->uh_dport = htons(0x08ae);
2262 udp->uh_ulen = htons(udp_len);
2263 udp->uh_sum = htons(0xbaad); /* xnb_add_mbuf_cksum will fill this in */
2264 data_payload = (uint8_t*)(udp + 1);
2265 data_payload[0] = 'F';
2266 data_payload[1] = 'r';
2267 data_payload[2] = 'e';
2268 data_payload[3] = 'e';
2269 data_payload[4] = 'B';
2270 data_payload[5] = 'S';
2271 data_payload[6] = 'D';
2272 data_payload[7] = '\n';
2273
2274 /* fill in the length field */
2275 mbufc->m_len = pkt_len;
2276 mbufc->m_pkthdr.len = pkt_len;
2277 /* indicate that the netfront uses hw-assisted checksums */
2278 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2279 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2280
2281 /* Function under test */
2282 xnb_add_mbuf_cksum(mbufc);
2283
2284 /* Check the checksums */
2285 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2286 XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2287
2288 m_freem(mbufc);
2289 }
2290
2291 /**
2292 * Helper function that populates a TCP packet used by all of the
2293 * xnb_add_mbuf_cksum tcp unit tests. m must already be allocated and must be
2294 * large enough
2295 */
2296 static void
xnb_fill_tcp(struct mbuf * m)2297 xnb_fill_tcp(struct mbuf *m)
2298 {
2299 struct ether_header *eh;
2300 struct ip *iph;
2301 struct tcphdr *tcp;
2302 uint32_t *options;
2303 uint8_t *data_payload;
2304
2305 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2306 eh = mtod(m, struct ether_header*);
2307 xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2308 iph = (struct ip*)(eh + 1);
2309 tcp = (struct tcphdr*)(iph + 1);
2310 tcp->th_sport = htons(0x9cd9);
2311 tcp->th_dport = htons(2222);
2312 tcp->th_seq = htonl(0x00f72b10);
2313 tcp->th_ack = htonl(0x7f37ba6c);
2314 tcp->th_x2 = 0;
2315 tcp->th_off = 8;
2316 tcp->th_flags = 0x18;
2317 tcp->th_win = htons(0x410);
2318 /* th_sum is incorrect; will be inserted by function under test */
2319 tcp->th_sum = htons(0xbaad);
2320 tcp->th_urp = htons(0);
2321 /*
2322 * The following 12 bytes of options encode:
2323 * [nop, nop, TS val 33247 ecr 3457687679]
2324 */
2325 options = (uint32_t*)(tcp + 1);
2326 options[0] = htonl(0x0101080a);
2327 options[1] = htonl(0x000081df);
2328 options[2] = htonl(0xce18207f);
2329 data_payload = (uint8_t*)(&options[3]);
2330 data_payload[0] = 'F';
2331 data_payload[1] = 'r';
2332 data_payload[2] = 'e';
2333 data_payload[3] = 'e';
2334 data_payload[4] = 'B';
2335 data_payload[5] = 'S';
2336 data_payload[6] = 'D';
2337 data_payload[7] = '\n';
2338 }
2339
2340 /**
2341 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2342 * packet
2343 */
2344 static void
xnb_add_mbuf_cksum_tcp(char * buffer,size_t buflen)2345 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2346 {
2347 const size_t payload_len = 8;
2348 const size_t tcp_options_len = 12;
2349 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2350 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2351 struct mbuf *mbufc;
2352 struct ether_header *eh;
2353 struct ip *iph;
2354 struct tcphdr *tcp;
2355 const uint16_t IP_CSUM = 0xa55a;
2356 const uint16_t TCP_CSUM = 0x2f64;
2357
2358 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2359 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2360 xnb_fill_tcp(mbufc);
2361 eh = mtod(mbufc, struct ether_header*);
2362 iph = (struct ip*)(eh + 1);
2363 tcp = (struct tcphdr*)(iph + 1);
2364
2365 /* fill in the length field */
2366 mbufc->m_len = pkt_len;
2367 mbufc->m_pkthdr.len = pkt_len;
2368 /* indicate that the netfront uses hw-assisted checksums */
2369 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2370 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2371
2372 /* Function under test */
2373 xnb_add_mbuf_cksum(mbufc);
2374
2375 /* Check the checksums */
2376 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2377 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2378
2379 m_freem(mbufc);
2380 }
2381
2382 /**
2383 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2384 */
2385 static void
xnb_add_mbuf_cksum_tcp_swcksum(char * buffer,size_t buflen)2386 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2387 {
2388 const size_t payload_len = 8;
2389 const size_t tcp_options_len = 12;
2390 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2391 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2392 struct mbuf *mbufc;
2393 struct ether_header *eh;
2394 struct ip *iph;
2395 struct tcphdr *tcp;
2396 /* Use deliberately bad checksums, and verify that they don't get */
2397 /* corrected by xnb_add_mbuf_cksum */
2398 const uint16_t IP_CSUM = 0xdead;
2399 const uint16_t TCP_CSUM = 0xbeef;
2400
2401 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2402 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2403 xnb_fill_tcp(mbufc);
2404 eh = mtod(mbufc, struct ether_header*);
2405 iph = (struct ip*)(eh + 1);
2406 iph->ip_sum = htons(IP_CSUM);
2407 tcp = (struct tcphdr*)(iph + 1);
2408 tcp->th_sum = htons(TCP_CSUM);
2409
2410 /* fill in the length field */
2411 mbufc->m_len = pkt_len;
2412 mbufc->m_pkthdr.len = pkt_len;
2413 /* indicate that the netfront does not use hw-assisted checksums */
2414 mbufc->m_pkthdr.csum_flags = 0;
2415
2416 /* Function under test */
2417 xnb_add_mbuf_cksum(mbufc);
2418
2419 /* Check that the checksums didn't change */
2420 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2421 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2422
2423 m_freem(mbufc);
2424 }
2425 #endif /* INET || INET6 */
2426
2427 /**
2428 * sscanf on unsigned chars
2429 */
2430 static void
xnb_sscanf_hhu(char * buffer,size_t buflen)2431 xnb_sscanf_hhu(char *buffer, size_t buflen)
2432 {
2433 const char mystr[] = "137";
2434 uint8_t dest[12];
2435 int i;
2436
2437 for (i = 0; i < 12; i++)
2438 dest[i] = 'X';
2439
2440 XNB_ASSERT(sscanf(mystr, "%hhu", &dest[4]) == 1);
2441 for (i = 0; i < 12; i++)
2442 XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2443 }
2444
2445 /**
2446 * sscanf on signed chars
2447 */
2448 static void
xnb_sscanf_hhd(char * buffer,size_t buflen)2449 xnb_sscanf_hhd(char *buffer, size_t buflen)
2450 {
2451 const char mystr[] = "-27";
2452 int8_t dest[12];
2453 int i;
2454
2455 for (i = 0; i < 12; i++)
2456 dest[i] = 'X';
2457
2458 XNB_ASSERT(sscanf(mystr, "%hhd", &dest[4]) == 1);
2459 for (i = 0; i < 12; i++)
2460 XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2461 }
2462
2463 /**
2464 * sscanf on signed long longs
2465 */
2466 static void
xnb_sscanf_lld(char * buffer,size_t buflen)2467 xnb_sscanf_lld(char *buffer, size_t buflen)
2468 {
2469 const char mystr[] = "-123456789012345"; /* about -2**47 */
2470 long long dest[3];
2471 int i;
2472
2473 for (i = 0; i < 3; i++)
2474 dest[i] = (long long)0xdeadbeefdeadbeef;
2475
2476 XNB_ASSERT(sscanf(mystr, "%lld", &dest[1]) == 1);
2477 for (i = 0; i < 3; i++)
2478 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2479 -123456789012345));
2480 }
2481
2482 /**
2483 * sscanf on unsigned long longs
2484 */
2485 static void
xnb_sscanf_llu(char * buffer,size_t buflen)2486 xnb_sscanf_llu(char *buffer, size_t buflen)
2487 {
2488 const char mystr[] = "12802747070103273189";
2489 unsigned long long dest[3];
2490 int i;
2491
2492 for (i = 0; i < 3; i++)
2493 dest[i] = (long long)0xdeadbeefdeadbeef;
2494
2495 XNB_ASSERT(sscanf(mystr, "%llu", &dest[1]) == 1);
2496 for (i = 0; i < 3; i++)
2497 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2498 12802747070103273189ull));
2499 }
2500
2501 /**
2502 * sscanf on unsigned short short n's
2503 */
2504 static void
xnb_sscanf_hhn(char * buffer,size_t buflen)2505 xnb_sscanf_hhn(char *buffer, size_t buflen)
2506 {
2507 const char mystr[] =
2508 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2509 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2510 "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2511 unsigned char dest[12];
2512 int i;
2513
2514 for (i = 0; i < 12; i++)
2515 dest[i] = (unsigned char)'X';
2516
2517 XNB_ASSERT(sscanf(mystr,
2518 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2519 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2520 "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]) == 0);
2521 for (i = 0; i < 12; i++)
2522 XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2523 }
2524