1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2018 Vincenzo Maffione 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30 /* 31 * This program contains a suite of unit tests for the netmap control device. 32 * 33 * On FreeBSD, you can run these tests with Kyua once installed in the system: 34 * # kyua test -k /usr/tests/sys/netmap/Kyuafile 35 * 36 * On Linux, you can run them directly: 37 * # ./ctrl-api-test 38 */ 39 40 #include <sys/ioctl.h> 41 #include <sys/mman.h> 42 #include <sys/wait.h> 43 44 #include <assert.h> 45 #include <ctype.h> 46 #include <errno.h> 47 #include <fcntl.h> 48 #include <inttypes.h> 49 #include <net/if.h> 50 #include <net/netmap.h> 51 #include <pthread.h> 52 #include <semaphore.h> 53 #include <stdint.h> 54 #include <stdio.h> 55 #include <stdlib.h> 56 #include <string.h> 57 #include <time.h> 58 #include <unistd.h> 59 #include <signal.h> 60 61 #ifdef __FreeBSD__ 62 #include "freebsd_test_suite/macros.h" 63 64 static int 65 eventfd(int x __unused, int y __unused) 66 { 67 errno = ENODEV; 68 return -1; 69 } 70 #else /* __linux__ */ 71 #include <sys/eventfd.h> 72 #endif 73 74 static int 75 exec_command(int argc, const char *const argv[]) 76 { 77 pid_t child_pid; 78 pid_t wret; 79 int child_status; 80 int i; 81 82 printf("Executing command: "); 83 for (i = 0; i < argc - 1; i++) { 84 if (!argv[i]) { 85 /* Invalid argument. */ 86 return -1; 87 } 88 if (i > 0) { 89 putchar(' '); 90 } 91 printf("%s", argv[i]); 92 } 93 putchar('\n'); 94 95 child_pid = fork(); 96 if (child_pid == 0) { 97 char **av; 98 int fds[3]; 99 100 /* Child process. Redirect stdin, stdout 101 * and stderr. */ 102 for (i = 0; i < 3; i++) { 103 close(i); 104 fds[i] = open("/dev/null", O_RDONLY); 105 if (fds[i] < 0) { 106 for (i--; i >= 0; i--) { 107 close(fds[i]); 108 } 109 return -1; 110 } 111 } 112 113 /* Make a copy of the arguments, passing them to execvp. */ 114 av = calloc(argc, sizeof(av[0])); 115 if (!av) { 116 exit(EXIT_FAILURE); 117 } 118 for (i = 0; i < argc - 1; i++) { 119 av[i] = strdup(argv[i]); 120 if (!av[i]) { 121 exit(EXIT_FAILURE); 122 } 123 } 124 execvp(av[0], av); 125 perror("execvp()"); 126 exit(EXIT_FAILURE); 127 } 128 129 wret = waitpid(child_pid, &child_status, 0); 130 if (wret < 0) { 131 fprintf(stderr, "waitpid() failed: %s\n", strerror(errno)); 132 return wret; 133 } 134 if (WIFEXITED(child_status)) { 135 return WEXITSTATUS(child_status); 136 } 137 138 return -1; 139 } 140 141 142 #define THRET_SUCCESS ((void *)128) 143 #define THRET_FAILURE ((void *)0) 144 145 struct TestContext { 146 char ifname[64]; 147 char ifname_ext[128]; 148 char bdgname[64]; 149 uint32_t nr_tx_slots; /* slots in tx rings */ 150 uint32_t nr_rx_slots; /* slots in rx rings */ 151 uint16_t nr_tx_rings; /* number of tx rings */ 152 uint16_t nr_rx_rings; /* number of rx rings */ 153 uint16_t nr_host_tx_rings; /* number of host tx rings */ 154 uint16_t nr_host_rx_rings; /* number of host rx rings */ 155 uint16_t nr_mem_id; /* id of the memory allocator */ 156 uint16_t nr_ringid; /* ring(s) we care about */ 157 uint32_t nr_mode; /* specify NR_REG_* modes */ 158 uint32_t nr_extra_bufs; /* number of requested extra buffers */ 159 uint64_t nr_flags; /* additional flags (see below) */ 160 uint32_t nr_hdr_len; /* for PORT_HDR_SET and PORT_HDR_GET */ 161 uint32_t nr_first_cpu_id; /* vale polling */ 162 uint32_t nr_num_polling_cpus; /* vale polling */ 163 uint32_t sync_kloop_mode; /* sync-kloop */ 164 int fd; /* netmap file descriptor */ 165 166 void *csb; /* CSB entries (atok and ktoa) */ 167 struct nmreq_option *nr_opt; /* list of options */ 168 sem_t *sem; /* for thread synchronization */ 169 }; 170 171 static struct TestContext ctx_; 172 173 typedef int (*testfunc_t)(struct TestContext *ctx); 174 175 static void 176 nmreq_hdr_init(struct nmreq_header *hdr, const char *ifname) 177 { 178 memset(hdr, 0, sizeof(*hdr)); 179 hdr->nr_version = NETMAP_API; 180 strncpy(hdr->nr_name, ifname, sizeof(hdr->nr_name) - 1); 181 } 182 183 /* Single NETMAP_REQ_PORT_INFO_GET. */ 184 static int 185 port_info_get(struct TestContext *ctx) 186 { 187 struct nmreq_port_info_get req; 188 struct nmreq_header hdr; 189 int success; 190 int ret; 191 192 printf("Testing NETMAP_REQ_PORT_INFO_GET on '%s'\n", ctx->ifname_ext); 193 194 nmreq_hdr_init(&hdr, ctx->ifname_ext); 195 hdr.nr_reqtype = NETMAP_REQ_PORT_INFO_GET; 196 hdr.nr_body = (uintptr_t)&req; 197 memset(&req, 0, sizeof(req)); 198 req.nr_mem_id = ctx->nr_mem_id; 199 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 200 if (ret != 0) { 201 perror("ioctl(/dev/netmap, NIOCCTRL, PORT_INFO_GET)"); 202 return ret; 203 } 204 printf("nr_memsize %llu\n", (unsigned long long)req.nr_memsize); 205 printf("nr_tx_slots %u\n", req.nr_tx_slots); 206 printf("nr_rx_slots %u\n", req.nr_rx_slots); 207 printf("nr_tx_rings %u\n", req.nr_tx_rings); 208 printf("nr_rx_rings %u\n", req.nr_rx_rings); 209 printf("nr_mem_id %u\n", req.nr_mem_id); 210 211 success = req.nr_memsize && req.nr_tx_slots && req.nr_rx_slots && 212 req.nr_tx_rings && req.nr_rx_rings && req.nr_tx_rings; 213 if (!success) { 214 return -1; 215 } 216 217 /* Write back results to the context structure. */ 218 ctx->nr_tx_slots = req.nr_tx_slots; 219 ctx->nr_rx_slots = req.nr_rx_slots; 220 ctx->nr_tx_rings = req.nr_tx_rings; 221 ctx->nr_rx_rings = req.nr_rx_rings; 222 ctx->nr_mem_id = req.nr_mem_id; 223 224 return 0; 225 } 226 227 /* Single NETMAP_REQ_REGISTER, no use. */ 228 static int 229 port_register(struct TestContext *ctx) 230 { 231 struct nmreq_register req; 232 struct nmreq_header hdr; 233 int success; 234 int ret; 235 236 printf("Testing NETMAP_REQ_REGISTER(mode=%d,ringid=%d," 237 "flags=0x%llx) on '%s'\n", 238 ctx->nr_mode, ctx->nr_ringid, (unsigned long long)ctx->nr_flags, 239 ctx->ifname_ext); 240 241 nmreq_hdr_init(&hdr, ctx->ifname_ext); 242 hdr.nr_reqtype = NETMAP_REQ_REGISTER; 243 hdr.nr_body = (uintptr_t)&req; 244 hdr.nr_options = (uintptr_t)ctx->nr_opt; 245 memset(&req, 0, sizeof(req)); 246 req.nr_mem_id = ctx->nr_mem_id; 247 req.nr_mode = ctx->nr_mode; 248 req.nr_ringid = ctx->nr_ringid; 249 req.nr_flags = ctx->nr_flags; 250 req.nr_tx_slots = ctx->nr_tx_slots; 251 req.nr_rx_slots = ctx->nr_rx_slots; 252 req.nr_tx_rings = ctx->nr_tx_rings; 253 req.nr_host_tx_rings = ctx->nr_host_tx_rings; 254 req.nr_host_rx_rings = ctx->nr_host_rx_rings; 255 req.nr_rx_rings = ctx->nr_rx_rings; 256 req.nr_extra_bufs = ctx->nr_extra_bufs; 257 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 258 if (ret != 0) { 259 perror("ioctl(/dev/netmap, NIOCCTRL, REGISTER)"); 260 return ret; 261 } 262 printf("nr_offset 0x%llx\n", (unsigned long long)req.nr_offset); 263 printf("nr_memsize %llu\n", (unsigned long long)req.nr_memsize); 264 printf("nr_tx_slots %u\n", req.nr_tx_slots); 265 printf("nr_rx_slots %u\n", req.nr_rx_slots); 266 printf("nr_tx_rings %u\n", req.nr_tx_rings); 267 printf("nr_rx_rings %u\n", req.nr_rx_rings); 268 printf("nr_host_tx_rings %u\n", req.nr_host_tx_rings); 269 printf("nr_host_rx_rings %u\n", req.nr_host_rx_rings); 270 printf("nr_mem_id %u\n", req.nr_mem_id); 271 printf("nr_extra_bufs %u\n", req.nr_extra_bufs); 272 273 success = req.nr_memsize && (ctx->nr_mode == req.nr_mode) && 274 (ctx->nr_ringid == req.nr_ringid) && 275 (ctx->nr_flags == req.nr_flags) && 276 ((!ctx->nr_tx_slots && req.nr_tx_slots) || 277 (ctx->nr_tx_slots == req.nr_tx_slots)) && 278 ((!ctx->nr_rx_slots && req.nr_rx_slots) || 279 (ctx->nr_rx_slots == req.nr_rx_slots)) && 280 ((!ctx->nr_tx_rings && req.nr_tx_rings) || 281 (ctx->nr_tx_rings == req.nr_tx_rings)) && 282 ((!ctx->nr_rx_rings && req.nr_rx_rings) || 283 (ctx->nr_rx_rings == req.nr_rx_rings)) && 284 ((!ctx->nr_host_tx_rings && req.nr_host_tx_rings) || 285 (ctx->nr_host_tx_rings == req.nr_host_tx_rings)) && 286 ((!ctx->nr_host_rx_rings && req.nr_host_rx_rings) || 287 (ctx->nr_host_rx_rings == req.nr_host_rx_rings)) && 288 ((!ctx->nr_mem_id && req.nr_mem_id) || 289 (ctx->nr_mem_id == req.nr_mem_id)) && 290 (ctx->nr_extra_bufs == req.nr_extra_bufs); 291 if (!success) { 292 return -1; 293 } 294 295 /* Write back results to the context structure.*/ 296 ctx->nr_tx_slots = req.nr_tx_slots; 297 ctx->nr_rx_slots = req.nr_rx_slots; 298 ctx->nr_tx_rings = req.nr_tx_rings; 299 ctx->nr_rx_rings = req.nr_rx_rings; 300 ctx->nr_host_tx_rings = req.nr_host_tx_rings; 301 ctx->nr_host_rx_rings = req.nr_host_rx_rings; 302 ctx->nr_mem_id = req.nr_mem_id; 303 ctx->nr_extra_bufs = req.nr_extra_bufs; 304 305 return 0; 306 } 307 308 static int 309 niocregif(struct TestContext *ctx, int netmap_api) 310 { 311 struct nmreq req; 312 int success; 313 int ret; 314 315 printf("Testing legacy NIOCREGIF on '%s'\n", ctx->ifname_ext); 316 317 memset(&req, 0, sizeof(req)); 318 memcpy(req.nr_name, ctx->ifname_ext, sizeof(req.nr_name)); 319 req.nr_name[sizeof(req.nr_name) - 1] = '\0'; 320 req.nr_version = netmap_api; 321 req.nr_ringid = ctx->nr_ringid; 322 req.nr_flags = ctx->nr_mode | ctx->nr_flags; 323 req.nr_tx_slots = ctx->nr_tx_slots; 324 req.nr_rx_slots = ctx->nr_rx_slots; 325 req.nr_tx_rings = ctx->nr_tx_rings; 326 req.nr_rx_rings = ctx->nr_rx_rings; 327 req.nr_arg2 = ctx->nr_mem_id; 328 req.nr_arg3 = ctx->nr_extra_bufs; 329 330 ret = ioctl(ctx->fd, NIOCREGIF, &req); 331 if (ret != 0) { 332 perror("ioctl(/dev/netmap, NIOCREGIF)"); 333 return ret; 334 } 335 336 printf("nr_offset 0x%x\n", req.nr_offset); 337 printf("nr_memsize %u\n", req.nr_memsize); 338 printf("nr_tx_slots %u\n", req.nr_tx_slots); 339 printf("nr_rx_slots %u\n", req.nr_rx_slots); 340 printf("nr_tx_rings %u\n", req.nr_tx_rings); 341 printf("nr_rx_rings %u\n", req.nr_rx_rings); 342 printf("nr_version %d\n", req.nr_version); 343 printf("nr_ringid %x\n", req.nr_ringid); 344 printf("nr_flags %x\n", req.nr_flags); 345 printf("nr_arg2 %u\n", req.nr_arg2); 346 printf("nr_arg3 %u\n", req.nr_arg3); 347 348 success = req.nr_memsize && 349 (ctx->nr_ringid == req.nr_ringid) && 350 ((ctx->nr_mode | ctx->nr_flags) == req.nr_flags) && 351 ((!ctx->nr_tx_slots && req.nr_tx_slots) || 352 (ctx->nr_tx_slots == req.nr_tx_slots)) && 353 ((!ctx->nr_rx_slots && req.nr_rx_slots) || 354 (ctx->nr_rx_slots == req.nr_rx_slots)) && 355 ((!ctx->nr_tx_rings && req.nr_tx_rings) || 356 (ctx->nr_tx_rings == req.nr_tx_rings)) && 357 ((!ctx->nr_rx_rings && req.nr_rx_rings) || 358 (ctx->nr_rx_rings == req.nr_rx_rings)) && 359 ((!ctx->nr_mem_id && req.nr_arg2) || 360 (ctx->nr_mem_id == req.nr_arg2)) && 361 (ctx->nr_extra_bufs == req.nr_arg3); 362 if (!success) { 363 return -1; 364 } 365 366 /* Write back results to the context structure.*/ 367 ctx->nr_tx_slots = req.nr_tx_slots; 368 ctx->nr_rx_slots = req.nr_rx_slots; 369 ctx->nr_tx_rings = req.nr_tx_rings; 370 ctx->nr_rx_rings = req.nr_rx_rings; 371 ctx->nr_mem_id = req.nr_arg2; 372 ctx->nr_extra_bufs = req.nr_arg3; 373 374 return ret; 375 } 376 377 /* The 11 ABI is the one right before the introduction of the new NIOCCTRL 378 * ABI. The 11 ABI is useful to perform tests with legacy applications 379 * (which use the 11 ABI) and new kernel (which uses 12, or higher). 380 * However, version 14 introduced a change in the layout of struct netmap_if, 381 * so that binary backward compatibility to 11 is not supported anymore. 382 */ 383 #define NETMAP_API_NIOCREGIF 14 384 385 static int 386 legacy_regif_default(struct TestContext *ctx) 387 { 388 return niocregif(ctx, NETMAP_API_NIOCREGIF); 389 } 390 391 static int 392 legacy_regif_all_nic(struct TestContext *ctx) 393 { 394 ctx->nr_mode = NR_REG_ALL_NIC; 395 return niocregif(ctx, NETMAP_API); 396 } 397 398 static int 399 legacy_regif_12(struct TestContext *ctx) 400 { 401 ctx->nr_mode = NR_REG_ALL_NIC; 402 return niocregif(ctx, NETMAP_API_NIOCREGIF+1); 403 } 404 405 static int 406 legacy_regif_sw(struct TestContext *ctx) 407 { 408 ctx->nr_mode = NR_REG_SW; 409 return niocregif(ctx, NETMAP_API_NIOCREGIF); 410 } 411 412 static int 413 legacy_regif_future(struct TestContext *ctx) 414 { 415 ctx->nr_mode = NR_REG_NIC_SW; 416 /* Test forward compatibility for the legacy ABI. This means 417 * using an older kernel (with ABI 12 or higher) and a newer 418 * application (with ABI greater than NETMAP_API). */ 419 return niocregif(ctx, NETMAP_API+2); 420 } 421 422 static int 423 legacy_regif_extra_bufs(struct TestContext *ctx) 424 { 425 ctx->nr_mode = NR_REG_ALL_NIC; 426 ctx->nr_extra_bufs = 20; /* arbitrary number of extra bufs */ 427 return niocregif(ctx, NETMAP_API_NIOCREGIF); 428 } 429 430 static int 431 legacy_regif_extra_bufs_pipe(struct TestContext *ctx) 432 { 433 strncat(ctx->ifname_ext, "{pipeexbuf", sizeof(ctx->ifname_ext)); 434 ctx->nr_mode = NR_REG_ALL_NIC; 435 ctx->nr_extra_bufs = 58; /* arbitrary number of extra bufs */ 436 437 return niocregif(ctx, NETMAP_API_NIOCREGIF); 438 } 439 440 static int 441 legacy_regif_extra_bufs_pipe_vale(struct TestContext *ctx) 442 { 443 strncpy(ctx->ifname_ext, "valeX1:Y4", sizeof(ctx->ifname_ext)); 444 return legacy_regif_extra_bufs_pipe(ctx); 445 } 446 447 /* Only valid after a successful port_register(). */ 448 static int 449 num_registered_rings(struct TestContext *ctx) 450 { 451 if (ctx->nr_flags & NR_TX_RINGS_ONLY) { 452 return ctx->nr_tx_rings; 453 } 454 if (ctx->nr_flags & NR_RX_RINGS_ONLY) { 455 return ctx->nr_rx_rings; 456 } 457 458 return ctx->nr_tx_rings + ctx->nr_rx_rings; 459 } 460 461 static int 462 port_register_hwall_host(struct TestContext *ctx) 463 { 464 ctx->nr_mode = NR_REG_NIC_SW; 465 return port_register(ctx); 466 } 467 468 static int 469 port_register_hostall(struct TestContext *ctx) 470 { 471 ctx->nr_mode = NR_REG_SW; 472 return port_register(ctx); 473 } 474 475 static int 476 port_register_hwall(struct TestContext *ctx) 477 { 478 ctx->nr_mode = NR_REG_ALL_NIC; 479 return port_register(ctx); 480 } 481 482 static int 483 port_register_single_hw_pair(struct TestContext *ctx) 484 { 485 ctx->nr_mode = NR_REG_ONE_NIC; 486 ctx->nr_ringid = 0; 487 return port_register(ctx); 488 } 489 490 static int 491 port_register_single_host_pair(struct TestContext *ctx) 492 { 493 ctx->nr_mode = NR_REG_ONE_SW; 494 ctx->nr_host_tx_rings = 2; 495 ctx->nr_host_rx_rings = 2; 496 ctx->nr_ringid = 1; 497 return port_register(ctx); 498 } 499 500 static int 501 port_register_hostall_many(struct TestContext *ctx) 502 { 503 ctx->nr_mode = NR_REG_SW; 504 ctx->nr_host_tx_rings = 5; 505 ctx->nr_host_rx_rings = 4; 506 return port_register(ctx); 507 } 508 509 static int 510 port_register_hwall_tx(struct TestContext *ctx) 511 { 512 ctx->nr_mode = NR_REG_ALL_NIC; 513 ctx->nr_flags |= NR_TX_RINGS_ONLY; 514 return port_register(ctx); 515 } 516 517 static int 518 port_register_hwall_rx(struct TestContext *ctx) 519 { 520 ctx->nr_mode = NR_REG_ALL_NIC; 521 ctx->nr_flags |= NR_RX_RINGS_ONLY; 522 return port_register(ctx); 523 } 524 525 /* NETMAP_REQ_VALE_ATTACH */ 526 static int 527 vale_attach(struct TestContext *ctx) 528 { 529 struct nmreq_vale_attach req; 530 struct nmreq_header hdr; 531 char vpname[sizeof(ctx->bdgname) + 1 + sizeof(ctx->ifname_ext)]; 532 int ret; 533 534 snprintf(vpname, sizeof(vpname), "%s:%s", ctx->bdgname, ctx->ifname_ext); 535 536 printf("Testing NETMAP_REQ_VALE_ATTACH on '%s'\n", vpname); 537 nmreq_hdr_init(&hdr, vpname); 538 hdr.nr_reqtype = NETMAP_REQ_VALE_ATTACH; 539 hdr.nr_body = (uintptr_t)&req; 540 memset(&req, 0, sizeof(req)); 541 req.reg.nr_mem_id = ctx->nr_mem_id; 542 if (ctx->nr_mode == 0) { 543 ctx->nr_mode = NR_REG_ALL_NIC; /* default */ 544 } 545 req.reg.nr_mode = ctx->nr_mode; 546 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 547 if (ret != 0) { 548 perror("ioctl(/dev/netmap, NIOCCTRL, VALE_ATTACH)"); 549 return ret; 550 } 551 printf("nr_mem_id %u\n", req.reg.nr_mem_id); 552 553 return ((!ctx->nr_mem_id && req.reg.nr_mem_id > 1) || 554 (ctx->nr_mem_id == req.reg.nr_mem_id)) && 555 (ctx->nr_flags == req.reg.nr_flags) 556 ? 0 557 : -1; 558 } 559 560 /* NETMAP_REQ_VALE_DETACH */ 561 static int 562 vale_detach(struct TestContext *ctx) 563 { 564 struct nmreq_header hdr; 565 struct nmreq_vale_detach req; 566 char vpname[256]; 567 int ret; 568 569 snprintf(vpname, sizeof(vpname), "%s:%s", ctx->bdgname, ctx->ifname_ext); 570 571 printf("Testing NETMAP_REQ_VALE_DETACH on '%s'\n", vpname); 572 nmreq_hdr_init(&hdr, vpname); 573 hdr.nr_reqtype = NETMAP_REQ_VALE_DETACH; 574 hdr.nr_body = (uintptr_t)&req; 575 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 576 if (ret != 0) { 577 perror("ioctl(/dev/netmap, NIOCCTRL, VALE_DETACH)"); 578 return ret; 579 } 580 581 return 0; 582 } 583 584 /* First NETMAP_REQ_VALE_ATTACH, then NETMAP_REQ_VALE_DETACH. */ 585 static int 586 vale_attach_detach(struct TestContext *ctx) 587 { 588 int ret; 589 590 if ((ret = vale_attach(ctx)) != 0) { 591 return ret; 592 } 593 594 return vale_detach(ctx); 595 } 596 597 static int 598 vale_attach_detach_host_rings(struct TestContext *ctx) 599 { 600 ctx->nr_mode = NR_REG_NIC_SW; 601 return vale_attach_detach(ctx); 602 } 603 604 /* First NETMAP_REQ_PORT_HDR_SET and the NETMAP_REQ_PORT_HDR_GET 605 * to check that we get the same value. */ 606 static int 607 port_hdr_set_and_get(struct TestContext *ctx) 608 { 609 struct nmreq_port_hdr req; 610 struct nmreq_header hdr; 611 int ret; 612 613 printf("Testing NETMAP_REQ_PORT_HDR_SET on '%s'\n", ctx->ifname_ext); 614 615 nmreq_hdr_init(&hdr, ctx->ifname_ext); 616 hdr.nr_reqtype = NETMAP_REQ_PORT_HDR_SET; 617 hdr.nr_body = (uintptr_t)&req; 618 memset(&req, 0, sizeof(req)); 619 req.nr_hdr_len = ctx->nr_hdr_len; 620 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 621 if (ret != 0) { 622 perror("ioctl(/dev/netmap, NIOCCTRL, PORT_HDR_SET)"); 623 return ret; 624 } 625 626 if (req.nr_hdr_len != ctx->nr_hdr_len) { 627 return -1; 628 } 629 630 printf("Testing NETMAP_REQ_PORT_HDR_GET on '%s'\n", ctx->ifname_ext); 631 hdr.nr_reqtype = NETMAP_REQ_PORT_HDR_GET; 632 req.nr_hdr_len = 0; 633 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 634 if (ret != 0) { 635 perror("ioctl(/dev/netmap, NIOCCTRL, PORT_HDR_SET)"); 636 return ret; 637 } 638 printf("nr_hdr_len %u\n", req.nr_hdr_len); 639 640 return (req.nr_hdr_len == ctx->nr_hdr_len) ? 0 : -1; 641 } 642 643 /* 644 * Possible lengths for the VirtIO network header, as specified by 645 * the standard: 646 * http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html 647 */ 648 #define VIRTIO_NET_HDR_LEN 10 649 #define VIRTIO_NET_HDR_LEN_WITH_MERGEABLE_RXBUFS 12 650 651 static int 652 vale_ephemeral_port_hdr_manipulation(struct TestContext *ctx) 653 { 654 int ret; 655 656 strncpy(ctx->ifname_ext, "vale:eph0", sizeof(ctx->ifname_ext)); 657 ctx->nr_mode = NR_REG_ALL_NIC; 658 if ((ret = port_register(ctx))) { 659 return ret; 660 } 661 /* Try to set and get all the acceptable values. */ 662 ctx->nr_hdr_len = VIRTIO_NET_HDR_LEN_WITH_MERGEABLE_RXBUFS; 663 if ((ret = port_hdr_set_and_get(ctx))) { 664 return ret; 665 } 666 ctx->nr_hdr_len = 0; 667 if ((ret = port_hdr_set_and_get(ctx))) { 668 return ret; 669 } 670 ctx->nr_hdr_len = VIRTIO_NET_HDR_LEN; 671 if ((ret = port_hdr_set_and_get(ctx))) { 672 return ret; 673 } 674 return 0; 675 } 676 677 static int 678 vale_persistent_port(struct TestContext *ctx) 679 { 680 struct nmreq_vale_newif req; 681 struct nmreq_header hdr; 682 int result; 683 int ret; 684 685 strncpy(ctx->ifname_ext, "per4", sizeof(ctx->ifname_ext)); 686 687 printf("Testing NETMAP_REQ_VALE_NEWIF on '%s'\n", ctx->ifname_ext); 688 689 nmreq_hdr_init(&hdr, ctx->ifname_ext); 690 hdr.nr_reqtype = NETMAP_REQ_VALE_NEWIF; 691 hdr.nr_body = (uintptr_t)&req; 692 memset(&req, 0, sizeof(req)); 693 req.nr_mem_id = ctx->nr_mem_id; 694 req.nr_tx_slots = ctx->nr_tx_slots; 695 req.nr_rx_slots = ctx->nr_rx_slots; 696 req.nr_tx_rings = ctx->nr_tx_rings; 697 req.nr_rx_rings = ctx->nr_rx_rings; 698 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 699 if (ret != 0) { 700 perror("ioctl(/dev/netmap, NIOCCTRL, VALE_NEWIF)"); 701 return ret; 702 } 703 704 /* Attach the persistent VALE port to a switch and then detach. */ 705 result = vale_attach_detach(ctx); 706 707 printf("Testing NETMAP_REQ_VALE_DELIF on '%s'\n", ctx->ifname_ext); 708 hdr.nr_reqtype = NETMAP_REQ_VALE_DELIF; 709 hdr.nr_body = (uintptr_t)NULL; 710 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 711 if (ret != 0) { 712 perror("ioctl(/dev/netmap, NIOCCTRL, VALE_NEWIF)"); 713 if (result == 0) { 714 result = ret; 715 } 716 } 717 718 return result; 719 } 720 721 /* Single NETMAP_REQ_POOLS_INFO_GET. */ 722 static int 723 pools_info_get(struct TestContext *ctx) 724 { 725 struct nmreq_pools_info req; 726 struct nmreq_header hdr; 727 int ret; 728 729 printf("Testing NETMAP_REQ_POOLS_INFO_GET on '%s'\n", ctx->ifname_ext); 730 731 nmreq_hdr_init(&hdr, ctx->ifname_ext); 732 hdr.nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; 733 hdr.nr_body = (uintptr_t)&req; 734 memset(&req, 0, sizeof(req)); 735 req.nr_mem_id = ctx->nr_mem_id; 736 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 737 if (ret != 0) { 738 perror("ioctl(/dev/netmap, NIOCCTRL, POOLS_INFO_GET)"); 739 return ret; 740 } 741 printf("nr_memsize %llu\n", (unsigned long long)req.nr_memsize); 742 printf("nr_mem_id %u\n", req.nr_mem_id); 743 printf("nr_if_pool_offset 0x%llx\n", 744 (unsigned long long)req.nr_if_pool_offset); 745 printf("nr_if_pool_objtotal %u\n", req.nr_if_pool_objtotal); 746 printf("nr_if_pool_objsize %u\n", req.nr_if_pool_objsize); 747 printf("nr_ring_pool_offset 0x%llx\n", 748 (unsigned long long)req.nr_if_pool_offset); 749 printf("nr_ring_pool_objtotal %u\n", req.nr_ring_pool_objtotal); 750 printf("nr_ring_pool_objsize %u\n", req.nr_ring_pool_objsize); 751 printf("nr_buf_pool_offset 0x%llx\n", 752 (unsigned long long)req.nr_buf_pool_offset); 753 printf("nr_buf_pool_objtotal %u\n", req.nr_buf_pool_objtotal); 754 printf("nr_buf_pool_objsize %u\n", req.nr_buf_pool_objsize); 755 756 return req.nr_memsize && req.nr_if_pool_objtotal && 757 req.nr_if_pool_objsize && 758 req.nr_ring_pool_objtotal && 759 req.nr_ring_pool_objsize && 760 req.nr_buf_pool_objtotal && 761 req.nr_buf_pool_objsize 762 ? 0 763 : -1; 764 } 765 766 static int 767 pools_info_get_and_register(struct TestContext *ctx) 768 { 769 int ret; 770 771 /* Check that we can get pools info before we register 772 * a netmap interface. */ 773 ret = pools_info_get(ctx); 774 if (ret != 0) { 775 return ret; 776 } 777 778 ctx->nr_mode = NR_REG_ONE_NIC; 779 ret = port_register(ctx); 780 if (ret != 0) { 781 return ret; 782 } 783 ctx->nr_mem_id = 1; 784 785 /* Check that we can get pools info also after we register. */ 786 return pools_info_get(ctx); 787 } 788 789 static int 790 pools_info_get_empty_ifname(struct TestContext *ctx) 791 { 792 strncpy(ctx->ifname_ext, "", sizeof(ctx->ifname_ext)); 793 return pools_info_get(ctx) != 0 ? 0 : -1; 794 } 795 796 static int 797 pipe_master(struct TestContext *ctx) 798 { 799 strncat(ctx->ifname_ext, "{pipeid1", sizeof(ctx->ifname_ext)); 800 ctx->nr_mode = NR_REG_NIC_SW; 801 802 if (port_register(ctx) == 0) { 803 printf("pipes should not accept NR_REG_NIC_SW\n"); 804 return -1; 805 } 806 ctx->nr_mode = NR_REG_ALL_NIC; 807 808 return port_register(ctx); 809 } 810 811 static int 812 pipe_slave(struct TestContext *ctx) 813 { 814 strncat(ctx->ifname_ext, "}pipeid2", sizeof(ctx->ifname_ext)); 815 ctx->nr_mode = NR_REG_ALL_NIC; 816 817 return port_register(ctx); 818 } 819 820 /* Test PORT_INFO_GET and POOLS_INFO_GET on a pipe. This is useful to test the 821 * registration request used internall by netmap. */ 822 static int 823 pipe_port_info_get(struct TestContext *ctx) 824 { 825 strncat(ctx->ifname_ext, "}pipeid3", sizeof(ctx->ifname_ext)); 826 827 return port_info_get(ctx); 828 } 829 830 static int 831 pipe_pools_info_get(struct TestContext *ctx) 832 { 833 strncat(ctx->ifname_ext, "{xid", sizeof(ctx->ifname_ext)); 834 835 return pools_info_get(ctx); 836 } 837 838 /* NETMAP_REQ_VALE_POLLING_ENABLE */ 839 static int 840 vale_polling_enable(struct TestContext *ctx) 841 { 842 struct nmreq_vale_polling req; 843 struct nmreq_header hdr; 844 char vpname[256]; 845 int ret; 846 847 snprintf(vpname, sizeof(vpname), "%s:%s", ctx->bdgname, ctx->ifname_ext); 848 printf("Testing NETMAP_REQ_VALE_POLLING_ENABLE on '%s'\n", vpname); 849 850 nmreq_hdr_init(&hdr, vpname); 851 hdr.nr_reqtype = NETMAP_REQ_VALE_POLLING_ENABLE; 852 hdr.nr_body = (uintptr_t)&req; 853 memset(&req, 0, sizeof(req)); 854 req.nr_mode = ctx->nr_mode; 855 req.nr_first_cpu_id = ctx->nr_first_cpu_id; 856 req.nr_num_polling_cpus = ctx->nr_num_polling_cpus; 857 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 858 if (ret != 0) { 859 perror("ioctl(/dev/netmap, NIOCCTRL, VALE_POLLING_ENABLE)"); 860 return ret; 861 } 862 863 return (req.nr_mode == ctx->nr_mode && 864 req.nr_first_cpu_id == ctx->nr_first_cpu_id && 865 req.nr_num_polling_cpus == ctx->nr_num_polling_cpus) 866 ? 0 867 : -1; 868 } 869 870 /* NETMAP_REQ_VALE_POLLING_DISABLE */ 871 static int 872 vale_polling_disable(struct TestContext *ctx) 873 { 874 struct nmreq_vale_polling req; 875 struct nmreq_header hdr; 876 char vpname[256]; 877 int ret; 878 879 snprintf(vpname, sizeof(vpname), "%s:%s", ctx->bdgname, ctx->ifname_ext); 880 printf("Testing NETMAP_REQ_VALE_POLLING_DISABLE on '%s'\n", vpname); 881 882 nmreq_hdr_init(&hdr, vpname); 883 hdr.nr_reqtype = NETMAP_REQ_VALE_POLLING_DISABLE; 884 hdr.nr_body = (uintptr_t)&req; 885 memset(&req, 0, sizeof(req)); 886 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 887 if (ret != 0) { 888 perror("ioctl(/dev/netmap, NIOCCTRL, VALE_POLLING_DISABLE)"); 889 return ret; 890 } 891 892 return 0; 893 } 894 895 static int 896 vale_polling_enable_disable(struct TestContext *ctx) 897 { 898 int ret = 0; 899 900 if ((ret = vale_attach(ctx)) != 0) { 901 return ret; 902 } 903 904 ctx->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU; 905 ctx->nr_num_polling_cpus = 1; 906 ctx->nr_first_cpu_id = 0; 907 if ((ret = vale_polling_enable(ctx))) { 908 vale_detach(ctx); 909 #ifdef __FreeBSD__ 910 /* NETMAP_REQ_VALE_POLLING_DISABLE is disabled on FreeBSD, 911 * because it is currently broken. We are happy to see that 912 * it fails. */ 913 return 0; 914 #else 915 return ret; 916 #endif 917 } 918 919 if ((ret = vale_polling_disable(ctx))) { 920 vale_detach(ctx); 921 return ret; 922 } 923 924 return vale_detach(ctx); 925 } 926 927 static void 928 push_option(struct nmreq_option *opt, struct TestContext *ctx) 929 { 930 opt->nro_next = (uintptr_t)ctx->nr_opt; 931 ctx->nr_opt = opt; 932 } 933 934 static void 935 clear_options(struct TestContext *ctx) 936 { 937 ctx->nr_opt = NULL; 938 } 939 940 static int 941 checkoption(struct nmreq_option *opt, struct nmreq_option *exp) 942 { 943 if (opt->nro_next != exp->nro_next) { 944 printf("nro_next %p expected %p\n", 945 (void *)(uintptr_t)opt->nro_next, 946 (void *)(uintptr_t)exp->nro_next); 947 return -1; 948 } 949 if (opt->nro_reqtype != exp->nro_reqtype) { 950 printf("nro_reqtype %u expected %u\n", opt->nro_reqtype, 951 exp->nro_reqtype); 952 return -1; 953 } 954 if (opt->nro_status != exp->nro_status) { 955 printf("nro_status %u expected %u\n", opt->nro_status, 956 exp->nro_status); 957 return -1; 958 } 959 return 0; 960 } 961 962 static int 963 unsupported_option(struct TestContext *ctx) 964 { 965 struct nmreq_option opt, save; 966 967 printf("Testing unsupported option on %s\n", ctx->ifname_ext); 968 969 memset(&opt, 0, sizeof(opt)); 970 opt.nro_reqtype = 1234; 971 push_option(&opt, ctx); 972 save = opt; 973 974 if (port_register_hwall(ctx) >= 0) 975 return -1; 976 977 clear_options(ctx); 978 save.nro_status = EOPNOTSUPP; 979 return checkoption(&opt, &save); 980 } 981 982 static int 983 infinite_options(struct TestContext *ctx) 984 { 985 struct nmreq_option opt; 986 987 printf("Testing infinite list of options on %s\n", ctx->ifname_ext); 988 989 opt.nro_reqtype = 1234; 990 push_option(&opt, ctx); 991 opt.nro_next = (uintptr_t)&opt; 992 if (port_register_hwall(ctx) >= 0) 993 return -1; 994 clear_options(ctx); 995 return (errno == EMSGSIZE ? 0 : -1); 996 } 997 998 #ifdef CONFIG_NETMAP_EXTMEM 999 int 1000 change_param(const char *pname, unsigned long newv, unsigned long *poldv) 1001 { 1002 #ifdef __linux__ 1003 char param[256] = "/sys/module/netmap/parameters/"; 1004 unsigned long oldv; 1005 FILE *f; 1006 1007 strncat(param, pname, sizeof(param) - 1); 1008 1009 f = fopen(param, "r+"); 1010 if (f == NULL) { 1011 perror(param); 1012 return -1; 1013 } 1014 if (fscanf(f, "%ld", &oldv) != 1) { 1015 perror(param); 1016 fclose(f); 1017 return -1; 1018 } 1019 if (poldv) 1020 *poldv = oldv; 1021 rewind(f); 1022 if (fprintf(f, "%ld\n", newv) < 0) { 1023 perror(param); 1024 fclose(f); 1025 return -1; 1026 } 1027 fclose(f); 1028 printf("change_param: %s: %ld -> %ld\n", pname, oldv, newv); 1029 #endif /* __linux__ */ 1030 return 0; 1031 } 1032 1033 static int 1034 push_extmem_option(struct TestContext *ctx, const struct nmreq_pools_info *pi, 1035 struct nmreq_opt_extmem *e) 1036 { 1037 void *addr; 1038 1039 addr = mmap(NULL, pi->nr_memsize, PROT_READ | PROT_WRITE, 1040 MAP_ANONYMOUS | MAP_SHARED, -1, 0); 1041 if (addr == MAP_FAILED) { 1042 perror("mmap"); 1043 return -1; 1044 } 1045 1046 memset(e, 0, sizeof(*e)); 1047 e->nro_opt.nro_reqtype = NETMAP_REQ_OPT_EXTMEM; 1048 e->nro_info = *pi; 1049 e->nro_usrptr = (uintptr_t)addr; 1050 1051 push_option(&e->nro_opt, ctx); 1052 1053 return 0; 1054 } 1055 1056 static int 1057 pop_extmem_option(struct TestContext *ctx, struct nmreq_opt_extmem *exp) 1058 { 1059 struct nmreq_opt_extmem *e; 1060 int ret; 1061 1062 e = (struct nmreq_opt_extmem *)(uintptr_t)ctx->nr_opt; 1063 ctx->nr_opt = (struct nmreq_option *)(uintptr_t)ctx->nr_opt->nro_next; 1064 1065 if ((ret = checkoption(&e->nro_opt, &exp->nro_opt))) { 1066 return ret; 1067 } 1068 1069 if (e->nro_usrptr != exp->nro_usrptr) { 1070 printf("usrptr %" PRIu64 " expected %" PRIu64 "\n", 1071 e->nro_usrptr, exp->nro_usrptr); 1072 return -1; 1073 } 1074 if (e->nro_info.nr_memsize != exp->nro_info.nr_memsize) { 1075 printf("memsize %" PRIu64 " expected %" PRIu64 "\n", 1076 e->nro_info.nr_memsize, exp->nro_info.nr_memsize); 1077 return -1; 1078 } 1079 1080 if ((ret = munmap((void *)(uintptr_t)e->nro_usrptr, 1081 e->nro_info.nr_memsize))) 1082 return ret; 1083 1084 return 0; 1085 } 1086 1087 static int 1088 _extmem_option(struct TestContext *ctx, 1089 const struct nmreq_pools_info *pi) 1090 { 1091 struct nmreq_opt_extmem e, save; 1092 int ret; 1093 1094 if ((ret = push_extmem_option(ctx, pi, &e)) < 0) 1095 return ret; 1096 1097 save = e; 1098 1099 strncpy(ctx->ifname_ext, "vale0:0", sizeof(ctx->ifname_ext)); 1100 ctx->nr_tx_slots = 16; 1101 ctx->nr_rx_slots = 16; 1102 1103 if ((ret = port_register_hwall(ctx))) 1104 return ret; 1105 1106 ret = pop_extmem_option(ctx, &save); 1107 1108 return ret; 1109 } 1110 1111 static size_t 1112 pools_info_min_memsize(const struct nmreq_pools_info *pi) 1113 { 1114 size_t tot = 0; 1115 1116 tot += pi->nr_if_pool_objtotal * pi->nr_if_pool_objsize; 1117 tot += pi->nr_ring_pool_objtotal * pi->nr_ring_pool_objsize; 1118 tot += pi->nr_buf_pool_objtotal * pi->nr_buf_pool_objsize; 1119 1120 return tot; 1121 } 1122 1123 /* 1124 * Fill the specification of a netmap memory allocator to be 1125 * used with the 'struct nmreq_opt_extmem' option. Arbitrary 1126 * values are used for the parameters, but with enough netmap 1127 * rings, netmap ifs, and buffers to support a VALE port. 1128 */ 1129 static void 1130 pools_info_fill(struct nmreq_pools_info *pi) 1131 { 1132 pi->nr_if_pool_objtotal = 2; 1133 pi->nr_if_pool_objsize = 1024; 1134 pi->nr_ring_pool_objtotal = 64; 1135 pi->nr_ring_pool_objsize = 512; 1136 pi->nr_buf_pool_objtotal = 4096; 1137 pi->nr_buf_pool_objsize = 2048; 1138 pi->nr_memsize = pools_info_min_memsize(pi); 1139 } 1140 1141 static int 1142 extmem_option(struct TestContext *ctx) 1143 { 1144 struct nmreq_pools_info pools_info; 1145 1146 pools_info_fill(&pools_info); 1147 1148 printf("Testing extmem option on vale0:0\n"); 1149 return _extmem_option(ctx, &pools_info); 1150 } 1151 1152 static int 1153 bad_extmem_option(struct TestContext *ctx) 1154 { 1155 struct nmreq_pools_info pools_info; 1156 1157 printf("Testing bad extmem option on vale0:0\n"); 1158 1159 pools_info_fill(&pools_info); 1160 /* Request a large ring size, to make sure that the kernel 1161 * rejects our request. */ 1162 pools_info.nr_ring_pool_objsize = (1 << 20); 1163 1164 return _extmem_option(ctx, &pools_info) < 0 ? 0 : -1; 1165 } 1166 1167 static int 1168 duplicate_extmem_options(struct TestContext *ctx) 1169 { 1170 struct nmreq_opt_extmem e1, save1, e2, save2; 1171 struct nmreq_pools_info pools_info; 1172 int ret; 1173 1174 printf("Testing duplicate extmem option on vale0:0\n"); 1175 1176 pools_info_fill(&pools_info); 1177 1178 if ((ret = push_extmem_option(ctx, &pools_info, &e1)) < 0) 1179 return ret; 1180 1181 if ((ret = push_extmem_option(ctx, &pools_info, &e2)) < 0) { 1182 clear_options(ctx); 1183 return ret; 1184 } 1185 1186 save1 = e1; 1187 save2 = e2; 1188 1189 strncpy(ctx->ifname_ext, "vale0:0", sizeof(ctx->ifname_ext)); 1190 ctx->nr_tx_slots = 16; 1191 ctx->nr_rx_slots = 16; 1192 1193 ret = port_register_hwall(ctx); 1194 if (ret >= 0) { 1195 printf("duplicate option not detected\n"); 1196 return -1; 1197 } 1198 1199 save2.nro_opt.nro_status = EINVAL; 1200 if ((ret = pop_extmem_option(ctx, &save2))) 1201 return ret; 1202 1203 save1.nro_opt.nro_status = EINVAL; 1204 if ((ret = pop_extmem_option(ctx, &save1))) 1205 return ret; 1206 1207 return 0; 1208 } 1209 #endif /* CONFIG_NETMAP_EXTMEM */ 1210 1211 static int 1212 push_csb_option(struct TestContext *ctx, struct nmreq_opt_csb *opt) 1213 { 1214 size_t csb_size; 1215 int num_entries; 1216 int ret; 1217 1218 ctx->nr_flags |= NR_EXCLUSIVE; 1219 1220 /* Get port info in order to use num_registered_rings(). */ 1221 ret = port_info_get(ctx); 1222 if (ret != 0) { 1223 return ret; 1224 } 1225 num_entries = num_registered_rings(ctx); 1226 1227 csb_size = (sizeof(struct nm_csb_atok) + sizeof(struct nm_csb_ktoa)) * 1228 num_entries; 1229 assert(csb_size > 0); 1230 if (ctx->csb) { 1231 free(ctx->csb); 1232 } 1233 ret = posix_memalign(&ctx->csb, sizeof(struct nm_csb_atok), csb_size); 1234 if (ret != 0) { 1235 printf("Failed to allocate CSB memory\n"); 1236 exit(EXIT_FAILURE); 1237 } 1238 1239 memset(opt, 0, sizeof(*opt)); 1240 opt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_CSB; 1241 opt->csb_atok = (uintptr_t)ctx->csb; 1242 opt->csb_ktoa = (uintptr_t)(((uint8_t *)ctx->csb) + 1243 sizeof(struct nm_csb_atok) * num_entries); 1244 1245 printf("Pushing option NETMAP_REQ_OPT_CSB\n"); 1246 push_option(&opt->nro_opt, ctx); 1247 1248 return 0; 1249 } 1250 1251 static int 1252 csb_mode(struct TestContext *ctx) 1253 { 1254 struct nmreq_opt_csb opt; 1255 int ret; 1256 1257 ret = push_csb_option(ctx, &opt); 1258 if (ret != 0) { 1259 return ret; 1260 } 1261 1262 ret = port_register_hwall(ctx); 1263 clear_options(ctx); 1264 1265 return ret; 1266 } 1267 1268 static int 1269 csb_mode_invalid_memory(struct TestContext *ctx) 1270 { 1271 struct nmreq_opt_csb opt; 1272 int ret; 1273 1274 memset(&opt, 0, sizeof(opt)); 1275 opt.nro_opt.nro_reqtype = NETMAP_REQ_OPT_CSB; 1276 opt.csb_atok = (uintptr_t)0x10; 1277 opt.csb_ktoa = (uintptr_t)0x800; 1278 push_option(&opt.nro_opt, ctx); 1279 1280 ctx->nr_flags = NR_EXCLUSIVE; 1281 ret = port_register_hwall(ctx); 1282 clear_options(ctx); 1283 1284 return (ret < 0) ? 0 : -1; 1285 } 1286 1287 static int 1288 sync_kloop_stop(struct TestContext *ctx) 1289 { 1290 struct nmreq_header hdr; 1291 int ret; 1292 1293 printf("Testing NETMAP_REQ_SYNC_KLOOP_STOP on '%s'\n", ctx->ifname_ext); 1294 1295 nmreq_hdr_init(&hdr, ctx->ifname_ext); 1296 hdr.nr_reqtype = NETMAP_REQ_SYNC_KLOOP_STOP; 1297 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 1298 if (ret != 0) { 1299 perror("ioctl(/dev/netmap, NIOCCTRL, SYNC_KLOOP_STOP)"); 1300 } 1301 1302 return ret; 1303 } 1304 1305 static void * 1306 sync_kloop_worker(void *opaque) 1307 { 1308 struct TestContext *ctx = opaque; 1309 struct nmreq_sync_kloop_start req; 1310 struct nmreq_header hdr; 1311 int ret; 1312 1313 printf("Testing NETMAP_REQ_SYNC_KLOOP_START on '%s'\n", ctx->ifname_ext); 1314 1315 nmreq_hdr_init(&hdr, ctx->ifname_ext); 1316 hdr.nr_reqtype = NETMAP_REQ_SYNC_KLOOP_START; 1317 hdr.nr_body = (uintptr_t)&req; 1318 hdr.nr_options = (uintptr_t)ctx->nr_opt; 1319 memset(&req, 0, sizeof(req)); 1320 req.sleep_us = 500; 1321 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 1322 if (ret != 0) { 1323 perror("ioctl(/dev/netmap, NIOCCTRL, SYNC_KLOOP_START)"); 1324 } 1325 1326 if (ctx->sem) { 1327 sem_post(ctx->sem); 1328 } 1329 1330 pthread_exit(ret ? (void *)THRET_FAILURE : (void *)THRET_SUCCESS); 1331 } 1332 1333 static int 1334 sync_kloop_start_stop(struct TestContext *ctx) 1335 { 1336 pthread_t th; 1337 void *thret = THRET_FAILURE; 1338 int ret; 1339 1340 ret = pthread_create(&th, NULL, sync_kloop_worker, ctx); 1341 if (ret != 0) { 1342 printf("pthread_create(kloop): %s\n", strerror(ret)); 1343 return -1; 1344 } 1345 1346 ret = sync_kloop_stop(ctx); 1347 if (ret != 0) { 1348 return ret; 1349 } 1350 1351 ret = pthread_join(th, &thret); 1352 if (ret != 0) { 1353 printf("pthread_join(kloop): %s\n", strerror(ret)); 1354 } 1355 1356 return thret == THRET_SUCCESS ? 0 : -1; 1357 } 1358 1359 static int 1360 sync_kloop(struct TestContext *ctx) 1361 { 1362 int ret; 1363 1364 ret = csb_mode(ctx); 1365 if (ret != 0) { 1366 return ret; 1367 } 1368 1369 return sync_kloop_start_stop(ctx); 1370 } 1371 1372 static int 1373 sync_kloop_eventfds(struct TestContext *ctx) 1374 { 1375 struct nmreq_opt_sync_kloop_eventfds *evopt = NULL; 1376 struct nmreq_opt_sync_kloop_mode modeopt; 1377 struct nmreq_option evsave; 1378 int num_entries; 1379 size_t opt_size; 1380 int ret, i; 1381 1382 memset(&modeopt, 0, sizeof(modeopt)); 1383 modeopt.nro_opt.nro_reqtype = NETMAP_REQ_OPT_SYNC_KLOOP_MODE; 1384 modeopt.mode = ctx->sync_kloop_mode; 1385 push_option(&modeopt.nro_opt, ctx); 1386 1387 num_entries = num_registered_rings(ctx); 1388 opt_size = sizeof(*evopt) + num_entries * sizeof(evopt->eventfds[0]); 1389 evopt = calloc(1, opt_size); 1390 evopt->nro_opt.nro_next = 0; 1391 evopt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS; 1392 evopt->nro_opt.nro_status = 0; 1393 evopt->nro_opt.nro_size = opt_size; 1394 for (i = 0; i < num_entries; i++) { 1395 int efd = eventfd(0, 0); 1396 1397 evopt->eventfds[i].ioeventfd = efd; 1398 efd = eventfd(0, 0); 1399 evopt->eventfds[i].irqfd = efd; 1400 } 1401 1402 push_option(&evopt->nro_opt, ctx); 1403 evsave = evopt->nro_opt; 1404 1405 ret = sync_kloop_start_stop(ctx); 1406 if (ret != 0) { 1407 free(evopt); 1408 clear_options(ctx); 1409 return ret; 1410 } 1411 #ifdef __linux__ 1412 evsave.nro_status = 0; 1413 #else /* !__linux__ */ 1414 evsave.nro_status = EOPNOTSUPP; 1415 #endif /* !__linux__ */ 1416 1417 ret = checkoption(&evopt->nro_opt, &evsave); 1418 free(evopt); 1419 clear_options(ctx); 1420 1421 return ret; 1422 } 1423 1424 static int 1425 sync_kloop_eventfds_all_mode(struct TestContext *ctx, 1426 uint32_t sync_kloop_mode) 1427 { 1428 int ret; 1429 1430 ret = csb_mode(ctx); 1431 if (ret != 0) { 1432 return ret; 1433 } 1434 1435 ctx->sync_kloop_mode = sync_kloop_mode; 1436 1437 return sync_kloop_eventfds(ctx); 1438 } 1439 1440 static int 1441 sync_kloop_eventfds_all(struct TestContext *ctx) 1442 { 1443 return sync_kloop_eventfds_all_mode(ctx, 0); 1444 } 1445 1446 static int 1447 sync_kloop_eventfds_all_tx(struct TestContext *ctx) 1448 { 1449 struct nmreq_opt_csb opt; 1450 int ret; 1451 1452 ret = push_csb_option(ctx, &opt); 1453 if (ret != 0) { 1454 return ret; 1455 } 1456 1457 ret = port_register_hwall_tx(ctx); 1458 if (ret != 0) { 1459 return ret; 1460 } 1461 clear_options(ctx); 1462 1463 return sync_kloop_eventfds(ctx); 1464 } 1465 1466 static int 1467 sync_kloop_eventfds_all_direct(struct TestContext *ctx) 1468 { 1469 return sync_kloop_eventfds_all_mode(ctx, 1470 NM_OPT_SYNC_KLOOP_DIRECT_TX | NM_OPT_SYNC_KLOOP_DIRECT_RX); 1471 } 1472 1473 static int 1474 sync_kloop_eventfds_all_direct_tx(struct TestContext *ctx) 1475 { 1476 return sync_kloop_eventfds_all_mode(ctx, 1477 NM_OPT_SYNC_KLOOP_DIRECT_TX); 1478 } 1479 1480 static int 1481 sync_kloop_eventfds_all_direct_rx(struct TestContext *ctx) 1482 { 1483 return sync_kloop_eventfds_all_mode(ctx, 1484 NM_OPT_SYNC_KLOOP_DIRECT_RX); 1485 } 1486 1487 static int 1488 sync_kloop_nocsb(struct TestContext *ctx) 1489 { 1490 int ret; 1491 1492 ret = port_register_hwall(ctx); 1493 if (ret != 0) { 1494 return ret; 1495 } 1496 1497 /* Sync kloop must fail because we did not use 1498 * NETMAP_REQ_CSB_ENABLE. */ 1499 return sync_kloop_start_stop(ctx) != 0 ? 0 : -1; 1500 } 1501 1502 static int 1503 csb_enable(struct TestContext *ctx) 1504 { 1505 struct nmreq_option saveopt; 1506 struct nmreq_opt_csb opt; 1507 struct nmreq_header hdr; 1508 int ret; 1509 1510 ret = push_csb_option(ctx, &opt); 1511 if (ret != 0) { 1512 return ret; 1513 } 1514 saveopt = opt.nro_opt; 1515 saveopt.nro_status = 0; 1516 1517 nmreq_hdr_init(&hdr, ctx->ifname_ext); 1518 hdr.nr_reqtype = NETMAP_REQ_CSB_ENABLE; 1519 hdr.nr_options = (uintptr_t)ctx->nr_opt; 1520 hdr.nr_body = (uintptr_t)NULL; 1521 1522 printf("Testing NETMAP_REQ_CSB_ENABLE on '%s'\n", ctx->ifname_ext); 1523 1524 ret = ioctl(ctx->fd, NIOCCTRL, &hdr); 1525 if (ret != 0) { 1526 perror("ioctl(/dev/netmap, NIOCCTRL, CSB_ENABLE)"); 1527 return ret; 1528 } 1529 1530 ret = checkoption(&opt.nro_opt, &saveopt); 1531 clear_options(ctx); 1532 1533 return ret; 1534 } 1535 1536 static int 1537 sync_kloop_csb_enable(struct TestContext *ctx) 1538 { 1539 int ret; 1540 1541 ctx->nr_flags |= NR_EXCLUSIVE; 1542 ret = port_register_hwall(ctx); 1543 if (ret != 0) { 1544 return ret; 1545 } 1546 1547 ret = csb_enable(ctx); 1548 if (ret != 0) { 1549 return ret; 1550 } 1551 1552 return sync_kloop_start_stop(ctx); 1553 } 1554 1555 static int 1556 sync_kloop_conflict(struct TestContext *ctx) 1557 { 1558 struct nmreq_opt_csb opt; 1559 pthread_t th1, th2; 1560 void *thret1 = THRET_FAILURE, *thret2 = THRET_FAILURE; 1561 struct timespec to; 1562 sem_t sem; 1563 int err = 0; 1564 int ret; 1565 1566 ret = push_csb_option(ctx, &opt); 1567 if (ret != 0) { 1568 return ret; 1569 } 1570 1571 ret = port_register_hwall(ctx); 1572 if (ret != 0) { 1573 return ret; 1574 } 1575 clear_options(ctx); 1576 1577 ret = sem_init(&sem, 0, 0); 1578 if (ret != 0) { 1579 printf("sem_init() failed: %s\n", strerror(ret)); 1580 return ret; 1581 } 1582 ctx->sem = &sem; 1583 1584 ret = pthread_create(&th1, NULL, sync_kloop_worker, ctx); 1585 err |= ret; 1586 if (ret != 0) { 1587 printf("pthread_create(kloop1): %s\n", strerror(ret)); 1588 } 1589 1590 ret = pthread_create(&th2, NULL, sync_kloop_worker, ctx); 1591 err |= ret; 1592 if (ret != 0) { 1593 printf("pthread_create(kloop2): %s\n", strerror(ret)); 1594 } 1595 1596 /* Wait for one of the two threads to fail to start the kloop, to 1597 * avoid a race condition where th1 starts the loop and stops, 1598 * and after that th2 starts the loop successfully. */ 1599 clock_gettime(CLOCK_REALTIME, &to); 1600 to.tv_sec += 2; 1601 ret = sem_timedwait(&sem, &to); 1602 err |= ret; 1603 if (ret != 0) { 1604 printf("sem_timedwait() failed: %s\n", strerror(errno)); 1605 } 1606 1607 err |= sync_kloop_stop(ctx); 1608 1609 ret = pthread_join(th1, &thret1); 1610 err |= ret; 1611 if (ret != 0) { 1612 printf("pthread_join(kloop1): %s\n", strerror(ret)); 1613 } 1614 1615 ret = pthread_join(th2, &thret2); 1616 err |= ret; 1617 if (ret != 0) { 1618 printf("pthread_join(kloop2): %s %d\n", strerror(ret), ret); 1619 } 1620 1621 sem_destroy(&sem); 1622 ctx->sem = NULL; 1623 if (err) { 1624 return err; 1625 } 1626 1627 /* Check that one of the two failed, while the other one succeeded. */ 1628 return ((thret1 == THRET_SUCCESS && thret2 == THRET_FAILURE) || 1629 (thret1 == THRET_FAILURE && thret2 == THRET_SUCCESS)) 1630 ? 0 1631 : -1; 1632 } 1633 1634 static int 1635 sync_kloop_eventfds_mismatch(struct TestContext *ctx) 1636 { 1637 struct nmreq_opt_csb opt; 1638 int ret; 1639 1640 ret = push_csb_option(ctx, &opt); 1641 if (ret != 0) { 1642 return ret; 1643 } 1644 1645 ret = port_register_hwall_rx(ctx); 1646 if (ret != 0) { 1647 return ret; 1648 } 1649 clear_options(ctx); 1650 1651 /* Deceive num_registered_rings() to trigger a failure of 1652 * sync_kloop_eventfds(). The latter will think that all the 1653 * rings were registered, and allocate the wrong number of 1654 * eventfds. */ 1655 ctx->nr_flags &= ~NR_RX_RINGS_ONLY; 1656 1657 return (sync_kloop_eventfds(ctx) != 0) ? 0 : -1; 1658 } 1659 1660 static int 1661 null_port(struct TestContext *ctx) 1662 { 1663 int ret; 1664 1665 ctx->nr_mem_id = 1; 1666 ctx->nr_mode = NR_REG_NULL; 1667 ctx->nr_tx_rings = 10; 1668 ctx->nr_rx_rings = 5; 1669 ctx->nr_tx_slots = 256; 1670 ctx->nr_rx_slots = 100; 1671 ret = port_register(ctx); 1672 if (ret != 0) { 1673 return ret; 1674 } 1675 return 0; 1676 } 1677 1678 static int 1679 null_port_all_zero(struct TestContext *ctx) 1680 { 1681 int ret; 1682 1683 ctx->nr_mem_id = 1; 1684 ctx->nr_mode = NR_REG_NULL; 1685 ctx->nr_tx_rings = 0; 1686 ctx->nr_rx_rings = 0; 1687 ctx->nr_tx_slots = 0; 1688 ctx->nr_rx_slots = 0; 1689 ret = port_register(ctx); 1690 if (ret != 0) { 1691 return ret; 1692 } 1693 return 0; 1694 } 1695 1696 static int 1697 null_port_sync(struct TestContext *ctx) 1698 { 1699 int ret; 1700 1701 ctx->nr_mem_id = 1; 1702 ctx->nr_mode = NR_REG_NULL; 1703 ctx->nr_tx_rings = 10; 1704 ctx->nr_rx_rings = 5; 1705 ctx->nr_tx_slots = 256; 1706 ctx->nr_rx_slots = 100; 1707 ret = port_register(ctx); 1708 if (ret != 0) { 1709 return ret; 1710 } 1711 ret = ioctl(ctx->fd, NIOCTXSYNC, 0); 1712 if (ret != 0) { 1713 return ret; 1714 } 1715 return 0; 1716 } 1717 1718 static void 1719 usage(const char *prog) 1720 { 1721 printf("%s -i IFNAME\n" 1722 "[-j TEST_NUM1[-[TEST_NUM2]] | -[TEST_NUM_2]]\n" 1723 "[-l (list test cases)]\n", 1724 prog); 1725 } 1726 1727 struct mytest { 1728 testfunc_t test; 1729 const char *name; 1730 }; 1731 1732 #define decltest(f) \ 1733 { \ 1734 .test = f, .name = #f \ 1735 } 1736 1737 static struct mytest tests[] = { 1738 decltest(port_info_get), 1739 decltest(port_register_hwall_host), 1740 decltest(port_register_hwall), 1741 decltest(port_register_hostall), 1742 decltest(port_register_single_hw_pair), 1743 decltest(port_register_single_host_pair), 1744 decltest(port_register_hostall_many), 1745 decltest(vale_attach_detach), 1746 decltest(vale_attach_detach_host_rings), 1747 decltest(vale_ephemeral_port_hdr_manipulation), 1748 decltest(vale_persistent_port), 1749 decltest(pools_info_get_and_register), 1750 decltest(pools_info_get_empty_ifname), 1751 decltest(pipe_master), 1752 decltest(pipe_slave), 1753 decltest(pipe_port_info_get), 1754 decltest(pipe_pools_info_get), 1755 decltest(vale_polling_enable_disable), 1756 decltest(unsupported_option), 1757 decltest(infinite_options), 1758 #ifdef CONFIG_NETMAP_EXTMEM 1759 decltest(extmem_option), 1760 decltest(bad_extmem_option), 1761 decltest(duplicate_extmem_options), 1762 #endif /* CONFIG_NETMAP_EXTMEM */ 1763 decltest(csb_mode), 1764 decltest(csb_mode_invalid_memory), 1765 decltest(sync_kloop), 1766 decltest(sync_kloop_eventfds_all), 1767 decltest(sync_kloop_eventfds_all_tx), 1768 decltest(sync_kloop_eventfds_all_direct), 1769 decltest(sync_kloop_eventfds_all_direct_tx), 1770 decltest(sync_kloop_eventfds_all_direct_rx), 1771 decltest(sync_kloop_nocsb), 1772 decltest(sync_kloop_csb_enable), 1773 decltest(sync_kloop_conflict), 1774 decltest(sync_kloop_eventfds_mismatch), 1775 decltest(null_port), 1776 decltest(null_port_all_zero), 1777 decltest(null_port_sync), 1778 decltest(legacy_regif_default), 1779 decltest(legacy_regif_all_nic), 1780 decltest(legacy_regif_12), 1781 decltest(legacy_regif_sw), 1782 decltest(legacy_regif_future), 1783 decltest(legacy_regif_extra_bufs), 1784 decltest(legacy_regif_extra_bufs_pipe), 1785 decltest(legacy_regif_extra_bufs_pipe_vale), 1786 }; 1787 1788 static void 1789 context_cleanup(struct TestContext *ctx) 1790 { 1791 if (ctx->csb) { 1792 free(ctx->csb); 1793 ctx->csb = NULL; 1794 } 1795 1796 close(ctx->fd); 1797 ctx->fd = -1; 1798 } 1799 1800 static int 1801 parse_interval(const char *arg, int *j, int *k) 1802 { 1803 const char *scan = arg; 1804 char *rest; 1805 1806 *j = 0; 1807 *k = -1; 1808 if (*scan == '-') { 1809 scan++; 1810 goto get_k; 1811 } 1812 if (!isdigit(*scan)) 1813 goto err; 1814 *k = strtol(scan, &rest, 10); 1815 *j = *k - 1; 1816 scan = rest; 1817 if (*scan == '-') { 1818 *k = -1; 1819 scan++; 1820 } 1821 get_k: 1822 if (*scan == '\0') 1823 return 0; 1824 if (!isdigit(*scan)) 1825 goto err; 1826 *k = strtol(scan, &rest, 10); 1827 scan = rest; 1828 if (!(*scan == '\0')) 1829 goto err; 1830 1831 return 0; 1832 1833 err: 1834 fprintf(stderr, "syntax error in '%s', must be num[-[num]] or -[num]\n", arg); 1835 return -1; 1836 } 1837 1838 #define ARGV_APPEND(_av, _ac, _x)\ 1839 do {\ 1840 assert((int)(_ac) < (int)(sizeof(_av)/sizeof((_av)[0])));\ 1841 (_av)[(_ac)++] = _x;\ 1842 } while (0) 1843 1844 static void 1845 tap_cleanup(int signo) 1846 { 1847 const char *av[8]; 1848 int ac = 0; 1849 1850 (void)signo; 1851 #ifdef __FreeBSD__ 1852 ARGV_APPEND(av, ac, "ifconfig"); 1853 ARGV_APPEND(av, ac, ctx_.ifname); 1854 ARGV_APPEND(av, ac, "destroy"); 1855 #else 1856 ARGV_APPEND(av, ac, "ip"); 1857 ARGV_APPEND(av, ac, "link"); 1858 ARGV_APPEND(av, ac, "del"); 1859 ARGV_APPEND(av, ac, ctx_.ifname); 1860 #endif 1861 ARGV_APPEND(av, ac, NULL); 1862 if (exec_command(ac, av)) { 1863 printf("Failed to destroy tap interface\n"); 1864 } 1865 } 1866 1867 int 1868 main(int argc, char **argv) 1869 { 1870 int create_tap = 1; 1871 int num_tests; 1872 int ret = 0; 1873 int j = 0; 1874 int k = -1; 1875 int list = 0; 1876 int opt; 1877 int i; 1878 1879 #ifdef __FreeBSD__ 1880 PLAIN_REQUIRE_KERNEL_MODULE("if_tap", 0); 1881 PLAIN_REQUIRE_KERNEL_MODULE("netmap", 0); 1882 #endif 1883 1884 memset(&ctx_, 0, sizeof(ctx_)); 1885 1886 { 1887 struct timespec t; 1888 int idx; 1889 1890 clock_gettime(CLOCK_REALTIME, &t); 1891 srand((unsigned int)t.tv_nsec); 1892 idx = rand() % 8000 + 100; 1893 snprintf(ctx_.ifname, sizeof(ctx_.ifname), "tap%d", idx); 1894 idx = rand() % 800 + 100; 1895 snprintf(ctx_.bdgname, sizeof(ctx_.bdgname), "vale%d", idx); 1896 } 1897 1898 while ((opt = getopt(argc, argv, "hi:j:l")) != -1) { 1899 switch (opt) { 1900 case 'h': 1901 usage(argv[0]); 1902 return 0; 1903 1904 case 'i': 1905 strncpy(ctx_.ifname, optarg, sizeof(ctx_.ifname) - 1); 1906 create_tap = 0; 1907 break; 1908 1909 case 'j': 1910 if (parse_interval(optarg, &j, &k) < 0) { 1911 usage(argv[0]); 1912 return -1; 1913 } 1914 break; 1915 1916 case 'l': 1917 list = 1; 1918 create_tap = 0; 1919 break; 1920 1921 default: 1922 printf(" Unrecognized option %c\n", opt); 1923 usage(argv[0]); 1924 return -1; 1925 } 1926 } 1927 1928 num_tests = sizeof(tests) / sizeof(tests[0]); 1929 1930 if (j < 0 || j >= num_tests || k > num_tests) { 1931 fprintf(stderr, "Test interval %d-%d out of range (%d-%d)\n", 1932 j + 1, k, 1, num_tests + 1); 1933 return -1; 1934 } 1935 1936 if (k < 0) 1937 k = num_tests; 1938 1939 if (list) { 1940 printf("Available tests:\n"); 1941 for (i = 0; i < num_tests; i++) { 1942 printf("#%03d: %s\n", i + 1, tests[i].name); 1943 } 1944 return 0; 1945 } 1946 1947 if (create_tap) { 1948 struct sigaction sa; 1949 const char *av[8]; 1950 int ac = 0; 1951 #ifdef __FreeBSD__ 1952 ARGV_APPEND(av, ac, "ifconfig"); 1953 ARGV_APPEND(av, ac, ctx_.ifname); 1954 ARGV_APPEND(av, ac, "create"); 1955 ARGV_APPEND(av, ac, "up"); 1956 #else 1957 ARGV_APPEND(av, ac, "ip"); 1958 ARGV_APPEND(av, ac, "tuntap"); 1959 ARGV_APPEND(av, ac, "add"); 1960 ARGV_APPEND(av, ac, "mode"); 1961 ARGV_APPEND(av, ac, "tap"); 1962 ARGV_APPEND(av, ac, "name"); 1963 ARGV_APPEND(av, ac, ctx_.ifname); 1964 #endif 1965 ARGV_APPEND(av, ac, NULL); 1966 if (exec_command(ac, av)) { 1967 printf("Failed to create tap interface\n"); 1968 return -1; 1969 } 1970 1971 sa.sa_handler = tap_cleanup; 1972 sigemptyset(&sa.sa_mask); 1973 sa.sa_flags = SA_RESTART; 1974 ret = sigaction(SIGINT, &sa, NULL); 1975 if (ret) { 1976 perror("sigaction(SIGINT)"); 1977 goto out; 1978 } 1979 ret = sigaction(SIGTERM, &sa, NULL); 1980 if (ret) { 1981 perror("sigaction(SIGTERM)"); 1982 goto out; 1983 } 1984 } 1985 1986 for (i = j; i < k; i++) { 1987 struct TestContext ctxcopy; 1988 int fd; 1989 printf("==> Start of Test #%d [%s]\n", i + 1, tests[i].name); 1990 fd = open("/dev/netmap", O_RDWR); 1991 if (fd < 0) { 1992 perror("open(/dev/netmap)"); 1993 ret = fd; 1994 goto out; 1995 } 1996 memcpy(&ctxcopy, &ctx_, sizeof(ctxcopy)); 1997 ctxcopy.fd = fd; 1998 memcpy(ctxcopy.ifname_ext, ctxcopy.ifname, 1999 sizeof(ctxcopy.ifname)); 2000 ret = tests[i].test(&ctxcopy); 2001 if (ret != 0) { 2002 printf("Test #%d [%s] failed\n", i + 1, tests[i].name); 2003 goto out; 2004 } 2005 printf("==> Test #%d [%s] successful\n", i + 1, tests[i].name); 2006 context_cleanup(&ctxcopy); 2007 } 2008 out: 2009 tap_cleanup(0); 2010 2011 return ret; 2012 } 2013