1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2014-2016 Giuseppe Lettieri 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* $FreeBSD$ */ 30 31 #if defined(__FreeBSD__) 32 #include <sys/cdefs.h> /* prerequisite */ 33 34 #include <sys/types.h> 35 #include <sys/errno.h> 36 #include <sys/param.h> /* defines used in kernel.h */ 37 #include <sys/kernel.h> /* types used in module initialization */ 38 #include <sys/malloc.h> 39 #include <sys/poll.h> 40 #include <sys/lock.h> 41 #include <sys/rwlock.h> 42 #include <sys/selinfo.h> 43 #include <sys/sysctl.h> 44 #include <sys/socket.h> /* sockaddrs */ 45 #include <net/if.h> 46 #include <net/if_var.h> 47 #include <machine/bus.h> /* bus_dmamap_* */ 48 #include <sys/refcount.h> 49 50 51 #elif defined(linux) 52 53 #include "bsd_glue.h" 54 55 #elif defined(__APPLE__) 56 57 #warning OSX support is only partial 58 #include "osx_glue.h" 59 60 #elif defined(_WIN32) 61 #include "win_glue.h" 62 63 #else 64 65 #error Unsupported platform 66 67 #endif /* unsupported */ 68 69 /* 70 * common headers 71 */ 72 73 #include <net/netmap.h> 74 #include <dev/netmap/netmap_kern.h> 75 #include <dev/netmap/netmap_mem2.h> 76 77 #ifdef WITH_PIPES 78 79 #define NM_PIPE_MAXSLOTS 4096 80 81 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */ 82 SYSBEGIN(vars_pipes); 83 SYSCTL_DECL(_dev_netmap); 84 SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW, 85 &netmap_default_pipes, 0, "For compatibility only"); 86 SYSEND; 87 88 /* allocate the pipe array in the parent adapter */ 89 static int 90 nm_pipe_alloc(struct netmap_adapter *na, u_int npipes) 91 { 92 size_t old_len, len; 93 struct netmap_pipe_adapter **npa; 94 95 if (npipes <= na->na_max_pipes) 96 /* we already have more entries that requested */ 97 return 0; 98 99 if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES) 100 return EINVAL; 101 102 old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes; 103 len = sizeof(struct netmap_pipe_adapter *) * npipes; 104 npa = nm_os_realloc(na->na_pipes, len, old_len); 105 if (npa == NULL) 106 return ENOMEM; 107 108 na->na_pipes = npa; 109 na->na_max_pipes = npipes; 110 111 return 0; 112 } 113 114 /* deallocate the parent array in the parent adapter */ 115 void 116 netmap_pipe_dealloc(struct netmap_adapter *na) 117 { 118 if (na->na_pipes) { 119 if (na->na_next_pipe > 0) { 120 D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name, 121 na->na_next_pipe); 122 } 123 nm_os_free(na->na_pipes); 124 na->na_pipes = NULL; 125 na->na_max_pipes = 0; 126 na->na_next_pipe = 0; 127 } 128 } 129 130 /* find a pipe endpoint with the given id among the parent's pipes */ 131 static struct netmap_pipe_adapter * 132 netmap_pipe_find(struct netmap_adapter *parent, u_int pipe_id) 133 { 134 int i; 135 struct netmap_pipe_adapter *na; 136 137 for (i = 0; i < parent->na_next_pipe; i++) { 138 na = parent->na_pipes[i]; 139 if (na->id == pipe_id) { 140 return na; 141 } 142 } 143 return NULL; 144 } 145 146 /* add a new pipe endpoint to the parent array */ 147 static int 148 netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) 149 { 150 if (parent->na_next_pipe >= parent->na_max_pipes) { 151 u_int npipes = parent->na_max_pipes ? 2*parent->na_max_pipes : 2; 152 int error = nm_pipe_alloc(parent, npipes); 153 if (error) 154 return error; 155 } 156 157 parent->na_pipes[parent->na_next_pipe] = na; 158 na->parent_slot = parent->na_next_pipe; 159 parent->na_next_pipe++; 160 return 0; 161 } 162 163 /* remove the given pipe endpoint from the parent array */ 164 static void 165 netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) 166 { 167 u_int n; 168 n = --parent->na_next_pipe; 169 if (n != na->parent_slot) { 170 struct netmap_pipe_adapter **p = 171 &parent->na_pipes[na->parent_slot]; 172 *p = parent->na_pipes[n]; 173 (*p)->parent_slot = na->parent_slot; 174 } 175 parent->na_pipes[n] = NULL; 176 } 177 178 int 179 netmap_pipe_txsync(struct netmap_kring *txkring, int flags) 180 { 181 struct netmap_kring *rxkring = txkring->pipe; 182 u_int limit; /* slots to transfer */ 183 u_int j, k, lim_tx = txkring->nkr_num_slots - 1, 184 lim_rx = rxkring->nkr_num_slots - 1; 185 int m, busy; 186 struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring; 187 188 ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); 189 ND(2, "before: hwcur %d hwtail %d cur %d head %d tail %d", txkring->nr_hwcur, txkring->nr_hwtail, 190 txkring->rcur, txkring->rhead, txkring->rtail); 191 192 j = rxkring->nr_hwtail; /* RX */ 193 k = txkring->nr_hwcur; /* TX */ 194 m = txkring->rhead - txkring->nr_hwcur; /* new slots */ 195 if (m < 0) 196 m += txkring->nkr_num_slots; 197 limit = m; 198 m = lim_rx; /* max avail space on destination */ 199 busy = j - rxkring->nr_hwcur; /* busy slots */ 200 if (busy < 0) 201 busy += rxkring->nkr_num_slots; 202 m -= busy; /* subtract busy slots */ 203 ND(2, "m %d limit %d", m, limit); 204 if (m < limit) 205 limit = m; 206 207 if (limit == 0) { 208 /* either the rxring is full, or nothing to send */ 209 return 0; 210 } 211 212 while (limit-- > 0) { 213 struct netmap_slot *rs = &rxring->slot[j]; 214 struct netmap_slot *ts = &txring->slot[k]; 215 struct netmap_slot tmp; 216 217 __builtin_prefetch(ts + 1); 218 219 /* swap the slots and report the buffer change */ 220 tmp = *rs; 221 tmp.flags |= NS_BUF_CHANGED; 222 *rs = *ts; 223 rs->flags |= NS_BUF_CHANGED; 224 *ts = tmp; 225 226 j = nm_next(j, lim_rx); 227 k = nm_next(k, lim_tx); 228 } 229 230 mb(); /* make sure the slots are updated before publishing them */ 231 rxkring->nr_hwtail = j; 232 txkring->nr_hwcur = k; 233 txkring->nr_hwtail = nm_prev(k, lim_tx); 234 235 ND(2, "after: hwcur %d hwtail %d cur %d head %d tail %d j %d", txkring->nr_hwcur, txkring->nr_hwtail, 236 txkring->rcur, txkring->rhead, txkring->rtail, j); 237 238 mb(); /* make sure rxkring->nr_hwtail is updated before notifying */ 239 rxkring->nm_notify(rxkring, 0); 240 241 return 0; 242 } 243 244 int 245 netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags) 246 { 247 struct netmap_kring *txkring = rxkring->pipe; 248 uint32_t oldhwcur = rxkring->nr_hwcur; 249 250 ND("%s %x <- %s", rxkring->name, flags, txkring->name); 251 rxkring->nr_hwcur = rxkring->rhead; /* recover user-relased slots */ 252 ND(5, "hwcur %d hwtail %d cur %d head %d tail %d", rxkring->nr_hwcur, rxkring->nr_hwtail, 253 rxkring->rcur, rxkring->rhead, rxkring->rtail); 254 mb(); /* paired with the first mb() in txsync */ 255 256 if (oldhwcur != rxkring->nr_hwcur) { 257 /* we have released some slots, notify the other end */ 258 mb(); /* make sure nr_hwcur is updated before notifying */ 259 txkring->nm_notify(txkring, 0); 260 } 261 return 0; 262 } 263 264 /* Pipe endpoints are created and destroyed together, so that endopoints do not 265 * have to check for the existence of their peer at each ?xsync. 266 * 267 * To play well with the existing netmap infrastructure (refcounts etc.), we 268 * adopt the following strategy: 269 * 270 * 1) The first endpoint that is created also creates the other endpoint and 271 * grabs a reference to it. 272 * 273 * state A) user1 --> endpoint1 --> endpoint2 274 * 275 * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives 276 * its reference to the user: 277 * 278 * state B) user1 --> endpoint1 endpoint2 <--- user2 279 * 280 * 3) Assume that, starting from state B endpoint2 is closed. In the unregister 281 * callback endpoint2 notes that endpoint1 is still active and adds a reference 282 * from endpoint1 to itself. When user2 then releases her own reference, 283 * endpoint2 is not destroyed and we are back to state A. A symmetrical state 284 * would be reached if endpoint1 were released instead. 285 * 286 * 4) If, starting from state A, endpoint1 is closed, the destructor notes that 287 * it owns a reference to endpoint2 and releases it. 288 * 289 * Something similar goes on for the creation and destruction of the krings. 290 */ 291 292 293 /* netmap_pipe_krings_create. 294 * 295 * There are two cases: 296 * 297 * 1) state is 298 * 299 * usr1 --> e1 --> e2 300 * 301 * and we are e1. We have to create both sets 302 * of krings. 303 * 304 * 2) state is 305 * 306 * usr1 --> e1 --> e2 307 * 308 * and we are e2. e1 is certainly registered and our 309 * krings already exist. Nothing to do. 310 */ 311 static int 312 netmap_pipe_krings_create(struct netmap_adapter *na) 313 { 314 struct netmap_pipe_adapter *pna = 315 (struct netmap_pipe_adapter *)na; 316 struct netmap_adapter *ona = &pna->peer->up; 317 int error = 0; 318 enum txrx t; 319 320 if (pna->peer_ref) { 321 int i; 322 323 /* case 1) above */ 324 ND("%p: case 1, create both ends", na); 325 error = netmap_krings_create(na, 0); 326 if (error) 327 goto err; 328 329 /* create the krings of the other end */ 330 error = netmap_krings_create(ona, 0); 331 if (error) 332 goto del_krings1; 333 334 /* cross link the krings */ 335 for_rx_tx(t) { 336 enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ 337 for (i = 0; i < nma_get_nrings(na, t); i++) { 338 NMR(na, t)[i].pipe = NMR(ona, r) + i; 339 NMR(ona, r)[i].pipe = NMR(na, t) + i; 340 } 341 } 342 343 } 344 return 0; 345 346 del_krings1: 347 netmap_krings_delete(na); 348 err: 349 return error; 350 } 351 352 /* netmap_pipe_reg. 353 * 354 * There are two cases on registration (onoff==1) 355 * 356 * 1.a) state is 357 * 358 * usr1 --> e1 --> e2 359 * 360 * and we are e1. Create the needed rings of the 361 * other end. 362 * 363 * 1.b) state is 364 * 365 * usr1 --> e1 --> e2 <-- usr2 366 * 367 * and we are e2. Drop the ref e1 is holding. 368 * 369 * There are two additional cases on unregister (onoff==0) 370 * 371 * 2.a) state is 372 * 373 * usr1 --> e1 --> e2 374 * 375 * and we are e1. Nothing special to do, e2 will 376 * be cleaned up by the destructor of e1. 377 * 378 * 2.b) state is 379 * 380 * usr1 --> e1 e2 <-- usr2 381 * 382 * and we are either e1 or e2. Add a ref from the 383 * other end and hide our rings. 384 */ 385 static int 386 netmap_pipe_reg(struct netmap_adapter *na, int onoff) 387 { 388 struct netmap_pipe_adapter *pna = 389 (struct netmap_pipe_adapter *)na; 390 struct netmap_adapter *ona = &pna->peer->up; 391 int i, error = 0; 392 enum txrx t; 393 394 ND("%p: onoff %d", na, onoff); 395 if (onoff) { 396 for_rx_tx(t) { 397 for (i = 0; i < nma_get_nrings(na, t); i++) { 398 struct netmap_kring *kring = &NMR(na, t)[i]; 399 400 if (nm_kring_pending_on(kring)) { 401 /* mark the peer ring as needed */ 402 kring->pipe->nr_kflags |= NKR_NEEDRING; 403 } 404 } 405 } 406 407 /* create all missing needed rings on the other end */ 408 error = netmap_mem_rings_create(ona); 409 if (error) 410 return error; 411 412 /* In case of no error we put our rings in netmap mode */ 413 for_rx_tx(t) { 414 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 415 struct netmap_kring *kring = &NMR(na, t)[i]; 416 417 if (nm_kring_pending_on(kring)) { 418 kring->nr_mode = NKR_NETMAP_ON; 419 } 420 } 421 } 422 if (na->active_fds == 0) 423 na->na_flags |= NAF_NETMAP_ON; 424 } else { 425 if (na->active_fds == 0) 426 na->na_flags &= ~NAF_NETMAP_ON; 427 for_rx_tx(t) { 428 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 429 struct netmap_kring *kring = &NMR(na, t)[i]; 430 431 if (nm_kring_pending_off(kring)) { 432 kring->nr_mode = NKR_NETMAP_OFF; 433 /* mark the peer ring as no longer needed by us 434 * (it may still be kept if sombody else is using it) 435 */ 436 if (kring->pipe) { 437 kring->pipe->nr_kflags &= ~NKR_NEEDRING; 438 } 439 } 440 } 441 } 442 /* delete all the peer rings that are no longer needed */ 443 netmap_mem_rings_delete(ona); 444 } 445 446 if (na->active_fds) { 447 ND("active_fds %d", na->active_fds); 448 return 0; 449 } 450 451 if (pna->peer_ref) { 452 ND("%p: case 1.a or 2.a, nothing to do", na); 453 return 0; 454 } 455 if (onoff) { 456 ND("%p: case 1.b, drop peer", na); 457 pna->peer->peer_ref = 0; 458 netmap_adapter_put(na); 459 } else { 460 ND("%p: case 2.b, grab peer", na); 461 netmap_adapter_get(na); 462 pna->peer->peer_ref = 1; 463 } 464 return error; 465 } 466 467 /* netmap_pipe_krings_delete. 468 * 469 * There are two cases: 470 * 471 * 1) state is 472 * 473 * usr1 --> e1 --> e2 474 * 475 * and we are e1 (e2 is not registered, so krings_delete cannot be 476 * called on it); 477 * 478 * 2) state is 479 * 480 * usr1 --> e1 e2 <-- usr2 481 * 482 * and we are either e1 or e2. 483 * 484 * In the former case we have to also delete the krings of e2; 485 * in the latter case we do nothing (note that our krings 486 * have already been hidden in the unregister callback). 487 */ 488 static void 489 netmap_pipe_krings_delete(struct netmap_adapter *na) 490 { 491 struct netmap_pipe_adapter *pna = 492 (struct netmap_pipe_adapter *)na; 493 struct netmap_adapter *ona; /* na of the other end */ 494 495 if (!pna->peer_ref) { 496 ND("%p: case 2, kept alive by peer", na); 497 return; 498 } 499 /* case 1) above */ 500 ND("%p: case 1, deleting everything", na); 501 netmap_krings_delete(na); /* also zeroes tx_rings etc. */ 502 ona = &pna->peer->up; 503 if (ona->tx_rings == NULL) { 504 /* already deleted, we must be on an 505 * cleanup-after-error path */ 506 return; 507 } 508 netmap_krings_delete(ona); 509 } 510 511 512 static void 513 netmap_pipe_dtor(struct netmap_adapter *na) 514 { 515 struct netmap_pipe_adapter *pna = 516 (struct netmap_pipe_adapter *)na; 517 ND("%p %p", na, pna->parent_ifp); 518 if (pna->peer_ref) { 519 ND("%p: clean up peer", na); 520 pna->peer_ref = 0; 521 netmap_adapter_put(&pna->peer->up); 522 } 523 if (pna->role == NR_REG_PIPE_MASTER) 524 netmap_pipe_remove(pna->parent, pna); 525 if (pna->parent_ifp) 526 if_rele(pna->parent_ifp); 527 netmap_adapter_put(pna->parent); 528 pna->parent = NULL; 529 } 530 531 int 532 netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, 533 struct netmap_mem_d *nmd, int create) 534 { 535 struct nmreq pnmr; 536 struct netmap_adapter *pna; /* parent adapter */ 537 struct netmap_pipe_adapter *mna, *sna, *req; 538 struct ifnet *ifp = NULL; 539 u_int pipe_id; 540 int role = nmr->nr_flags & NR_REG_MASK; 541 int error, retries = 0; 542 543 ND("flags %x", nmr->nr_flags); 544 545 if (role != NR_REG_PIPE_MASTER && role != NR_REG_PIPE_SLAVE) { 546 ND("not a pipe"); 547 return 0; 548 } 549 role = nmr->nr_flags & NR_REG_MASK; 550 551 /* first, try to find the parent adapter */ 552 bzero(&pnmr, sizeof(pnmr)); 553 memcpy(&pnmr.nr_name, nmr->nr_name, IFNAMSIZ); 554 /* pass to parent the requested number of pipes */ 555 pnmr.nr_arg1 = nmr->nr_arg1; 556 for (;;) { 557 int create_error; 558 559 error = netmap_get_na(&pnmr, &pna, &ifp, nmd, create); 560 if (!error) 561 break; 562 if (error != ENXIO || retries++) { 563 ND("parent lookup failed: %d", error); 564 return error; 565 } 566 ND("try to create a persistent vale port"); 567 /* create a persistent vale port and try again */ 568 NMG_UNLOCK(); 569 create_error = netmap_vi_create(&pnmr, 1 /* autodelete */); 570 NMG_LOCK(); 571 if (create_error && create_error != EEXIST) { 572 if (create_error != EOPNOTSUPP) { 573 D("failed to create a persistent vale port: %d", create_error); 574 } 575 return error; 576 } 577 } 578 579 if (NETMAP_OWNED_BY_KERN(pna)) { 580 ND("parent busy"); 581 error = EBUSY; 582 goto put_out; 583 } 584 585 /* next, lookup the pipe id in the parent list */ 586 req = NULL; 587 pipe_id = nmr->nr_ringid & NETMAP_RING_MASK; 588 mna = netmap_pipe_find(pna, pipe_id); 589 if (mna) { 590 if (mna->role == role) { 591 ND("found %d directly at %d", pipe_id, mna->parent_slot); 592 req = mna; 593 } else { 594 ND("found %d indirectly at %d", pipe_id, mna->parent_slot); 595 req = mna->peer; 596 } 597 /* the pipe we have found already holds a ref to the parent, 598 * so we need to drop the one we got from netmap_get_na() 599 */ 600 netmap_unget_na(pna, ifp); 601 goto found; 602 } 603 ND("pipe %d not found, create %d", pipe_id, create); 604 if (!create) { 605 error = ENODEV; 606 goto put_out; 607 } 608 /* we create both master and slave. 609 * The endpoint we were asked for holds a reference to 610 * the other one. 611 */ 612 mna = nm_os_malloc(sizeof(*mna)); 613 if (mna == NULL) { 614 error = ENOMEM; 615 goto put_out; 616 } 617 snprintf(mna->up.name, sizeof(mna->up.name), "%s{%d", pna->name, pipe_id); 618 619 mna->id = pipe_id; 620 mna->role = NR_REG_PIPE_MASTER; 621 mna->parent = pna; 622 mna->parent_ifp = ifp; 623 624 mna->up.nm_txsync = netmap_pipe_txsync; 625 mna->up.nm_rxsync = netmap_pipe_rxsync; 626 mna->up.nm_register = netmap_pipe_reg; 627 mna->up.nm_dtor = netmap_pipe_dtor; 628 mna->up.nm_krings_create = netmap_pipe_krings_create; 629 mna->up.nm_krings_delete = netmap_pipe_krings_delete; 630 mna->up.nm_mem = netmap_mem_get(pna->nm_mem); 631 mna->up.na_flags |= NAF_MEM_OWNER; 632 mna->up.na_lut = pna->na_lut; 633 634 mna->up.num_tx_rings = 1; 635 mna->up.num_rx_rings = 1; 636 mna->up.num_tx_desc = nmr->nr_tx_slots; 637 nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 638 1, NM_PIPE_MAXSLOTS, NULL); 639 mna->up.num_rx_desc = nmr->nr_rx_slots; 640 nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 641 1, NM_PIPE_MAXSLOTS, NULL); 642 error = netmap_attach_common(&mna->up); 643 if (error) 644 goto free_mna; 645 /* register the master with the parent */ 646 error = netmap_pipe_add(pna, mna); 647 if (error) 648 goto free_mna; 649 650 /* create the slave */ 651 sna = nm_os_malloc(sizeof(*mna)); 652 if (sna == NULL) { 653 error = ENOMEM; 654 goto unregister_mna; 655 } 656 /* most fields are the same, copy from master and then fix */ 657 *sna = *mna; 658 sna->up.nm_mem = netmap_mem_get(mna->up.nm_mem); 659 snprintf(sna->up.name, sizeof(sna->up.name), "%s}%d", pna->name, pipe_id); 660 sna->role = NR_REG_PIPE_SLAVE; 661 error = netmap_attach_common(&sna->up); 662 if (error) 663 goto free_sna; 664 665 /* join the two endpoints */ 666 mna->peer = sna; 667 sna->peer = mna; 668 669 /* we already have a reference to the parent, but we 670 * need another one for the other endpoint we created 671 */ 672 netmap_adapter_get(pna); 673 /* likewise for the ifp, if any */ 674 if (ifp) 675 if_ref(ifp); 676 677 if (role == NR_REG_PIPE_MASTER) { 678 req = mna; 679 mna->peer_ref = 1; 680 netmap_adapter_get(&sna->up); 681 } else { 682 req = sna; 683 sna->peer_ref = 1; 684 netmap_adapter_get(&mna->up); 685 } 686 ND("created master %p and slave %p", mna, sna); 687 found: 688 689 ND("pipe %d %s at %p", pipe_id, 690 (req->role == NR_REG_PIPE_MASTER ? "master" : "slave"), req); 691 *na = &req->up; 692 netmap_adapter_get(*na); 693 694 /* keep the reference to the parent. 695 * It will be released by the req destructor 696 */ 697 698 return 0; 699 700 free_sna: 701 nm_os_free(sna); 702 unregister_mna: 703 netmap_pipe_remove(pna, mna); 704 free_mna: 705 nm_os_free(mna); 706 put_out: 707 netmap_unget_na(pna, ifp); 708 return error; 709 } 710 711 712 #endif /* WITH_PIPES */ 713