1 /* 2 * Copyright (c) 2002-2007 Niels Provos <[email protected]> 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 #include "evconfig-private.h" 30 31 #ifdef _WIN32 32 #include <winsock2.h> 33 #include <windows.h> 34 #include <io.h> 35 #endif 36 37 #ifdef EVENT__HAVE_VASPRINTF 38 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 39 * stdio.h. This comes from evconfig-private.h. 40 */ 41 #endif 42 43 #include <sys/types.h> 44 45 #ifdef EVENT__HAVE_SYS_TIME_H 46 #include <sys/time.h> 47 #endif 48 49 #ifdef EVENT__HAVE_SYS_SOCKET_H 50 #include <sys/socket.h> 51 #endif 52 53 #ifdef EVENT__HAVE_SYS_UIO_H 54 #include <sys/uio.h> 55 #endif 56 57 #ifdef EVENT__HAVE_SYS_IOCTL_H 58 #include <sys/ioctl.h> 59 #endif 60 61 #ifdef EVENT__HAVE_SYS_MMAN_H 62 #include <sys/mman.h> 63 #endif 64 65 #ifdef EVENT__HAVE_SYS_SENDFILE_H 66 #include <sys/sendfile.h> 67 #endif 68 #ifdef EVENT__HAVE_SYS_STAT_H 69 #include <sys/stat.h> 70 #endif 71 72 73 #include <errno.h> 74 #include <stdio.h> 75 #include <stdlib.h> 76 #include <string.h> 77 #ifdef EVENT__HAVE_STDARG_H 78 #include <stdarg.h> 79 #endif 80 #ifdef EVENT__HAVE_UNISTD_H 81 #include <unistd.h> 82 #endif 83 #include <limits.h> 84 85 #include "event2/event.h" 86 #include "event2/buffer.h" 87 #include "event2/buffer_compat.h" 88 #include "event2/bufferevent.h" 89 #include "event2/bufferevent_compat.h" 90 #include "event2/bufferevent_struct.h" 91 #include "event2/thread.h" 92 #include "log-internal.h" 93 #include "mm-internal.h" 94 #include "util-internal.h" 95 #include "evthread-internal.h" 96 #include "evbuffer-internal.h" 97 #include "bufferevent-internal.h" 98 99 /* some systems do not have MAP_FAILED */ 100 #ifndef MAP_FAILED 101 #define MAP_FAILED ((void *)-1) 102 #endif 103 104 /* send file support */ 105 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) 106 #define USE_SENDFILE 1 107 #define SENDFILE_IS_LINUX 1 108 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) 109 #define USE_SENDFILE 1 110 #define SENDFILE_IS_FREEBSD 1 111 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) 112 #define USE_SENDFILE 1 113 #define SENDFILE_IS_MACOSX 1 114 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 115 #define USE_SENDFILE 1 116 #define SENDFILE_IS_SOLARIS 1 117 #endif 118 119 /* Mask of user-selectable callback flags. */ 120 #define EVBUFFER_CB_USER_FLAGS 0xffff 121 /* Mask of all internal-use-only flags. */ 122 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 123 124 /* Flag set if the callback is using the cb_obsolete function pointer */ 125 #define EVBUFFER_CB_OBSOLETE 0x00040000 126 127 /* evbuffer_chain support */ 128 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 129 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 130 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 131 132 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 133 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 134 135 /* evbuffer_ptr support */ 136 #define PTR_NOT_FOUND(ptr) do { \ 137 (ptr)->pos = -1; \ 138 (ptr)->internal_.chain = NULL; \ 139 (ptr)->internal_.pos_in_chain = 0; \ 140 } while (0) 141 142 static void evbuffer_chain_align(struct evbuffer_chain *chain); 143 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 144 size_t datalen); 145 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg); 146 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 147 const struct evbuffer_ptr *pos, const char *mem, size_t len); 148 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 149 size_t datlen); 150 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 151 size_t howfar); 152 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); 153 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); 154 155 static struct evbuffer_chain * 156 evbuffer_chain_new(size_t size) 157 { 158 struct evbuffer_chain *chain; 159 size_t to_alloc; 160 161 size += EVBUFFER_CHAIN_SIZE; 162 163 /* get the next largest memory that can hold the buffer */ 164 to_alloc = MIN_BUFFER_SIZE; 165 while (to_alloc < size) 166 to_alloc <<= 1; 167 168 /* we get everything in one chunk */ 169 if ((chain = mm_malloc(to_alloc)) == NULL) 170 return (NULL); 171 172 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 173 174 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 175 176 /* this way we can manipulate the buffer to different addresses, 177 * which is required for mmap for example. 178 */ 179 chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain); 180 181 chain->refcnt = 1; 182 183 return (chain); 184 } 185 186 static inline void 187 evbuffer_chain_free(struct evbuffer_chain *chain) 188 { 189 EVUTIL_ASSERT(chain->refcnt > 0); 190 if (--chain->refcnt > 0) { 191 /* chain is still referenced by other chains */ 192 return; 193 } 194 195 if (CHAIN_PINNED(chain)) { 196 /* will get freed once no longer dangling */ 197 chain->refcnt++; 198 chain->flags |= EVBUFFER_DANGLING; 199 return; 200 } 201 202 /* safe to release chain, it's either a referencing 203 * chain or all references to it have been freed */ 204 if (chain->flags & EVBUFFER_REFERENCE) { 205 struct evbuffer_chain_reference *info = 206 EVBUFFER_CHAIN_EXTRA( 207 struct evbuffer_chain_reference, 208 chain); 209 if (info->cleanupfn) 210 (*info->cleanupfn)(chain->buffer, 211 chain->buffer_len, 212 info->extra); 213 } 214 if (chain->flags & EVBUFFER_FILESEGMENT) { 215 struct evbuffer_chain_file_segment *info = 216 EVBUFFER_CHAIN_EXTRA( 217 struct evbuffer_chain_file_segment, 218 chain); 219 if (info->segment) { 220 #ifdef _WIN32 221 if (info->segment->is_mapping) 222 UnmapViewOfFile(chain->buffer); 223 #endif 224 evbuffer_file_segment_free(info->segment); 225 } 226 } 227 if (chain->flags & EVBUFFER_MULTICAST) { 228 struct evbuffer_multicast_parent *info = 229 EVBUFFER_CHAIN_EXTRA( 230 struct evbuffer_multicast_parent, 231 chain); 232 /* referencing chain is being freed, decrease 233 * refcounts of source chain and associated 234 * evbuffer (which get freed once both reach 235 * zero) */ 236 EVUTIL_ASSERT(info->source != NULL); 237 EVUTIL_ASSERT(info->parent != NULL); 238 EVBUFFER_LOCK(info->source); 239 evbuffer_chain_free(info->parent); 240 evbuffer_decref_and_unlock_(info->source); 241 } 242 243 mm_free(chain); 244 } 245 246 static void 247 evbuffer_free_all_chains(struct evbuffer_chain *chain) 248 { 249 struct evbuffer_chain *next; 250 for (; chain; chain = next) { 251 next = chain->next; 252 evbuffer_chain_free(chain); 253 } 254 } 255 256 #ifndef NDEBUG 257 static int 258 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 259 { 260 for (; chain; chain = chain->next) { 261 if (chain->off) 262 return 0; 263 } 264 return 1; 265 } 266 #else 267 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 268 "unused variable" warnings. */ 269 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 270 return 1; 271 } 272 #endif 273 274 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 275 * to replacing them all with a new chain. Return a pointer to the place 276 * where the new chain will go. 277 * 278 * Internal; requires lock. The caller must fix up buf->last and buf->first 279 * as needed; they might have been freed. 280 */ 281 static struct evbuffer_chain ** 282 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 283 { 284 struct evbuffer_chain **ch = buf->last_with_datap; 285 /* Find the first victim chain. It might be *last_with_datap */ 286 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 287 ch = &(*ch)->next; 288 if (*ch) { 289 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 290 evbuffer_free_all_chains(*ch); 291 *ch = NULL; 292 } 293 return ch; 294 } 295 296 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 297 * chains as necessary. Requires lock. Does not schedule callbacks. 298 */ 299 static void 300 evbuffer_chain_insert(struct evbuffer *buf, 301 struct evbuffer_chain *chain) 302 { 303 ASSERT_EVBUFFER_LOCKED(buf); 304 if (*buf->last_with_datap == NULL) { 305 /* There are no chains data on the buffer at all. */ 306 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 307 EVUTIL_ASSERT(buf->first == NULL); 308 buf->first = buf->last = chain; 309 } else { 310 struct evbuffer_chain **chp; 311 chp = evbuffer_free_trailing_empty_chains(buf); 312 *chp = chain; 313 if (chain->off) 314 buf->last_with_datap = chp; 315 buf->last = chain; 316 } 317 buf->total_len += chain->off; 318 } 319 320 static inline struct evbuffer_chain * 321 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 322 { 323 struct evbuffer_chain *chain; 324 if ((chain = evbuffer_chain_new(datlen)) == NULL) 325 return NULL; 326 evbuffer_chain_insert(buf, chain); 327 return chain; 328 } 329 330 void 331 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) 332 { 333 EVUTIL_ASSERT((chain->flags & flag) == 0); 334 chain->flags |= flag; 335 } 336 337 void 338 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) 339 { 340 EVUTIL_ASSERT((chain->flags & flag) != 0); 341 chain->flags &= ~flag; 342 if (chain->flags & EVBUFFER_DANGLING) 343 evbuffer_chain_free(chain); 344 } 345 346 static inline void 347 evbuffer_chain_incref(struct evbuffer_chain *chain) 348 { 349 ++chain->refcnt; 350 } 351 352 struct evbuffer * 353 evbuffer_new(void) 354 { 355 struct evbuffer *buffer; 356 357 buffer = mm_calloc(1, sizeof(struct evbuffer)); 358 if (buffer == NULL) 359 return (NULL); 360 361 LIST_INIT(&buffer->callbacks); 362 buffer->refcnt = 1; 363 buffer->last_with_datap = &buffer->first; 364 365 return (buffer); 366 } 367 368 int 369 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 370 { 371 EVBUFFER_LOCK(buf); 372 buf->flags |= (ev_uint32_t)flags; 373 EVBUFFER_UNLOCK(buf); 374 return 0; 375 } 376 377 int 378 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 379 { 380 EVBUFFER_LOCK(buf); 381 buf->flags &= ~(ev_uint32_t)flags; 382 EVBUFFER_UNLOCK(buf); 383 return 0; 384 } 385 386 void 387 evbuffer_incref_(struct evbuffer *buf) 388 { 389 EVBUFFER_LOCK(buf); 390 ++buf->refcnt; 391 EVBUFFER_UNLOCK(buf); 392 } 393 394 void 395 evbuffer_incref_and_lock_(struct evbuffer *buf) 396 { 397 EVBUFFER_LOCK(buf); 398 ++buf->refcnt; 399 } 400 401 int 402 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 403 { 404 EVBUFFER_LOCK(buffer); 405 buffer->cb_queue = event_base_get_deferred_cb_queue(base); 406 buffer->deferred_cbs = 1; 407 event_deferred_cb_init(&buffer->deferred, 408 evbuffer_deferred_callback, buffer); 409 EVBUFFER_UNLOCK(buffer); 410 return 0; 411 } 412 413 int 414 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 415 { 416 #ifdef EVENT__DISABLE_THREAD_SUPPORT 417 return -1; 418 #else 419 if (buf->lock) 420 return -1; 421 422 if (!lock) { 423 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 424 if (!lock) 425 return -1; 426 buf->lock = lock; 427 buf->own_lock = 1; 428 } else { 429 buf->lock = lock; 430 buf->own_lock = 0; 431 } 432 433 return 0; 434 #endif 435 } 436 437 void 438 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev) 439 { 440 EVBUFFER_LOCK(buf); 441 buf->parent = bev; 442 EVBUFFER_UNLOCK(buf); 443 } 444 445 static void 446 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 447 { 448 struct evbuffer_cb_entry *cbent, *next; 449 struct evbuffer_cb_info info; 450 size_t new_size; 451 ev_uint32_t mask, masked_val; 452 int clear = 1; 453 454 if (running_deferred) { 455 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 456 masked_val = EVBUFFER_CB_ENABLED; 457 } else if (buffer->deferred_cbs) { 458 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 459 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 460 /* Don't zero-out n_add/n_del, since the deferred callbacks 461 will want to see them. */ 462 clear = 0; 463 } else { 464 mask = EVBUFFER_CB_ENABLED; 465 masked_val = EVBUFFER_CB_ENABLED; 466 } 467 468 ASSERT_EVBUFFER_LOCKED(buffer); 469 470 if (LIST_EMPTY(&buffer->callbacks)) { 471 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 472 return; 473 } 474 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 475 return; 476 477 new_size = buffer->total_len; 478 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 479 info.n_added = buffer->n_add_for_cb; 480 info.n_deleted = buffer->n_del_for_cb; 481 if (clear) { 482 buffer->n_add_for_cb = 0; 483 buffer->n_del_for_cb = 0; 484 } 485 for (cbent = LIST_FIRST(&buffer->callbacks); 486 cbent != LIST_END(&buffer->callbacks); 487 cbent = next) { 488 /* Get the 'next' pointer now in case this callback decides 489 * to remove itself or something. */ 490 next = LIST_NEXT(cbent, next); 491 492 if ((cbent->flags & mask) != masked_val) 493 continue; 494 495 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 496 cbent->cb.cb_obsolete(buffer, 497 info.orig_size, new_size, cbent->cbarg); 498 else 499 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 500 } 501 } 502 503 void 504 evbuffer_invoke_callbacks(struct evbuffer *buffer) 505 { 506 if (LIST_EMPTY(&buffer->callbacks)) { 507 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 508 return; 509 } 510 511 if (buffer->deferred_cbs) { 512 if (buffer->deferred.queued) 513 return; 514 evbuffer_incref_and_lock_(buffer); 515 if (buffer->parent) 516 bufferevent_incref(buffer->parent); 517 EVBUFFER_UNLOCK(buffer); 518 event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred); 519 } 520 521 evbuffer_run_callbacks(buffer, 0); 522 } 523 524 static void 525 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg) 526 { 527 struct bufferevent *parent = NULL; 528 struct evbuffer *buffer = arg; 529 530 /* XXXX It would be better to run these callbacks without holding the 531 * lock */ 532 EVBUFFER_LOCK(buffer); 533 parent = buffer->parent; 534 evbuffer_run_callbacks(buffer, 1); 535 evbuffer_decref_and_unlock_(buffer); 536 if (parent) 537 bufferevent_decref(parent); 538 } 539 540 static void 541 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 542 { 543 struct evbuffer_cb_entry *cbent; 544 545 while ((cbent = LIST_FIRST(&buffer->callbacks))) { 546 LIST_REMOVE(cbent, next); 547 mm_free(cbent); 548 } 549 } 550 551 void 552 evbuffer_decref_and_unlock_(struct evbuffer *buffer) 553 { 554 struct evbuffer_chain *chain, *next; 555 ASSERT_EVBUFFER_LOCKED(buffer); 556 557 EVUTIL_ASSERT(buffer->refcnt > 0); 558 559 if (--buffer->refcnt > 0) { 560 EVBUFFER_UNLOCK(buffer); 561 return; 562 } 563 564 for (chain = buffer->first; chain != NULL; chain = next) { 565 next = chain->next; 566 evbuffer_chain_free(chain); 567 } 568 evbuffer_remove_all_callbacks(buffer); 569 if (buffer->deferred_cbs) 570 event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred); 571 572 EVBUFFER_UNLOCK(buffer); 573 if (buffer->own_lock) 574 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 575 mm_free(buffer); 576 } 577 578 void 579 evbuffer_free(struct evbuffer *buffer) 580 { 581 EVBUFFER_LOCK(buffer); 582 evbuffer_decref_and_unlock_(buffer); 583 } 584 585 void 586 evbuffer_lock(struct evbuffer *buf) 587 { 588 EVBUFFER_LOCK(buf); 589 } 590 591 void 592 evbuffer_unlock(struct evbuffer *buf) 593 { 594 EVBUFFER_UNLOCK(buf); 595 } 596 597 size_t 598 evbuffer_get_length(const struct evbuffer *buffer) 599 { 600 size_t result; 601 602 EVBUFFER_LOCK(buffer); 603 604 result = (buffer->total_len); 605 606 EVBUFFER_UNLOCK(buffer); 607 608 return result; 609 } 610 611 size_t 612 evbuffer_get_contiguous_space(const struct evbuffer *buf) 613 { 614 struct evbuffer_chain *chain; 615 size_t result; 616 617 EVBUFFER_LOCK(buf); 618 chain = buf->first; 619 result = (chain != NULL ? chain->off : 0); 620 EVBUFFER_UNLOCK(buf); 621 622 return result; 623 } 624 625 size_t 626 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { 627 int n; 628 size_t res; 629 size_t to_alloc; 630 631 EVBUFFER_LOCK(buf); 632 633 res = to_alloc = 0; 634 635 for (n = 0; n < n_vec; n++) { 636 to_alloc += vec[n].iov_len; 637 } 638 639 if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { 640 goto done; 641 } 642 643 for (n = 0; n < n_vec; n++) { 644 /* XXX each 'add' call here does a bunch of setup that's 645 * obviated by evbuffer_expand_fast_, and some cleanup that we 646 * would like to do only once. Instead we should just extract 647 * the part of the code that's needed. */ 648 649 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { 650 goto done; 651 } 652 653 res += vec[n].iov_len; 654 } 655 656 done: 657 EVBUFFER_UNLOCK(buf); 658 return res; 659 } 660 661 int 662 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 663 struct evbuffer_iovec *vec, int n_vecs) 664 { 665 struct evbuffer_chain *chain, **chainp; 666 int n = -1; 667 668 EVBUFFER_LOCK(buf); 669 if (buf->freeze_end) 670 goto done; 671 if (n_vecs < 1) 672 goto done; 673 if (n_vecs == 1) { 674 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 675 goto done; 676 677 vec[0].iov_base = CHAIN_SPACE_PTR(chain); 678 vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain); 679 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 680 n = 1; 681 } else { 682 if (evbuffer_expand_fast_(buf, size, n_vecs)<0) 683 goto done; 684 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, 685 &chainp, 0); 686 } 687 688 done: 689 EVBUFFER_UNLOCK(buf); 690 return n; 691 692 } 693 694 static int 695 advance_last_with_data(struct evbuffer *buf) 696 { 697 int n = 0; 698 ASSERT_EVBUFFER_LOCKED(buf); 699 700 if (!*buf->last_with_datap) 701 return 0; 702 703 while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) { 704 buf->last_with_datap = &(*buf->last_with_datap)->next; 705 ++n; 706 } 707 return n; 708 } 709 710 int 711 evbuffer_commit_space(struct evbuffer *buf, 712 struct evbuffer_iovec *vec, int n_vecs) 713 { 714 struct evbuffer_chain *chain, **firstchainp, **chainp; 715 int result = -1; 716 size_t added = 0; 717 int i; 718 719 EVBUFFER_LOCK(buf); 720 721 if (buf->freeze_end) 722 goto done; 723 if (n_vecs == 0) { 724 result = 0; 725 goto done; 726 } else if (n_vecs == 1 && 727 (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) { 728 /* The user only got or used one chain; it might not 729 * be the first one with space in it. */ 730 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 731 goto done; 732 buf->last->off += vec[0].iov_len; 733 added = vec[0].iov_len; 734 if (added) 735 advance_last_with_data(buf); 736 goto okay; 737 } 738 739 /* Advance 'firstchain' to the first chain with space in it. */ 740 firstchainp = buf->last_with_datap; 741 if (!*firstchainp) 742 goto done; 743 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 744 firstchainp = &(*firstchainp)->next; 745 } 746 747 chain = *firstchainp; 748 /* pass 1: make sure that the pointers and lengths of vecs[] are in 749 * bounds before we try to commit anything. */ 750 for (i=0; i<n_vecs; ++i) { 751 if (!chain) 752 goto done; 753 if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) || 754 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 755 goto done; 756 chain = chain->next; 757 } 758 /* pass 2: actually adjust all the chains. */ 759 chainp = firstchainp; 760 for (i=0; i<n_vecs; ++i) { 761 (*chainp)->off += vec[i].iov_len; 762 added += vec[i].iov_len; 763 if (vec[i].iov_len) { 764 buf->last_with_datap = chainp; 765 } 766 chainp = &(*chainp)->next; 767 } 768 769 okay: 770 buf->total_len += added; 771 buf->n_add_for_cb += added; 772 result = 0; 773 evbuffer_invoke_callbacks(buf); 774 775 done: 776 EVBUFFER_UNLOCK(buf); 777 return result; 778 } 779 780 static inline int 781 HAS_PINNED_R(struct evbuffer *buf) 782 { 783 return (buf->last && CHAIN_PINNED_R(buf->last)); 784 } 785 786 static inline void 787 ZERO_CHAIN(struct evbuffer *dst) 788 { 789 ASSERT_EVBUFFER_LOCKED(dst); 790 dst->first = NULL; 791 dst->last = NULL; 792 dst->last_with_datap = &(dst)->first; 793 dst->total_len = 0; 794 } 795 796 /* Prepares the contents of src to be moved to another buffer by removing 797 * read-pinned chains. The first pinned chain is saved in first, and the 798 * last in last. If src has no read-pinned chains, first and last are set 799 * to NULL. */ 800 static int 801 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 802 struct evbuffer_chain **last) 803 { 804 struct evbuffer_chain *chain, **pinned; 805 806 ASSERT_EVBUFFER_LOCKED(src); 807 808 if (!HAS_PINNED_R(src)) { 809 *first = *last = NULL; 810 return 0; 811 } 812 813 pinned = src->last_with_datap; 814 if (!CHAIN_PINNED_R(*pinned)) 815 pinned = &(*pinned)->next; 816 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 817 chain = *first = *pinned; 818 *last = src->last; 819 820 /* If there's data in the first pinned chain, we need to allocate 821 * a new chain and copy the data over. */ 822 if (chain->off) { 823 struct evbuffer_chain *tmp; 824 825 EVUTIL_ASSERT(pinned == src->last_with_datap); 826 tmp = evbuffer_chain_new(chain->off); 827 if (!tmp) 828 return -1; 829 memcpy(tmp->buffer, chain->buffer + chain->misalign, 830 chain->off); 831 tmp->off = chain->off; 832 *src->last_with_datap = tmp; 833 src->last = tmp; 834 chain->misalign += chain->off; 835 chain->off = 0; 836 } else { 837 src->last = *src->last_with_datap; 838 *pinned = NULL; 839 } 840 841 return 0; 842 } 843 844 static inline void 845 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 846 struct evbuffer_chain *last) 847 { 848 ASSERT_EVBUFFER_LOCKED(src); 849 850 if (!pinned) { 851 ZERO_CHAIN(src); 852 return; 853 } 854 855 src->first = pinned; 856 src->last = last; 857 src->last_with_datap = &src->first; 858 src->total_len = 0; 859 } 860 861 static inline void 862 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 863 { 864 ASSERT_EVBUFFER_LOCKED(dst); 865 ASSERT_EVBUFFER_LOCKED(src); 866 dst->first = src->first; 867 if (src->last_with_datap == &src->first) 868 dst->last_with_datap = &dst->first; 869 else 870 dst->last_with_datap = src->last_with_datap; 871 dst->last = src->last; 872 dst->total_len = src->total_len; 873 } 874 875 static void 876 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 877 { 878 ASSERT_EVBUFFER_LOCKED(dst); 879 ASSERT_EVBUFFER_LOCKED(src); 880 dst->last->next = src->first; 881 if (src->last_with_datap == &src->first) 882 dst->last_with_datap = &dst->last->next; 883 else 884 dst->last_with_datap = src->last_with_datap; 885 dst->last = src->last; 886 dst->total_len += src->total_len; 887 } 888 889 static inline void 890 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) 891 { 892 struct evbuffer_chain *tmp; 893 struct evbuffer_chain *chain = src->first; 894 struct evbuffer_multicast_parent *extra; 895 896 ASSERT_EVBUFFER_LOCKED(dst); 897 ASSERT_EVBUFFER_LOCKED(src); 898 899 for (; chain; chain = chain->next) { 900 if (!chain->off || chain->flags & EVBUFFER_DANGLING) { 901 /* skip empty chains */ 902 continue; 903 } 904 905 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); 906 if (!tmp) { 907 event_warn("%s: out of memory", __func__); 908 return; 909 } 910 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); 911 /* reference evbuffer containing source chain so it 912 * doesn't get released while the chain is still 913 * being referenced to */ 914 evbuffer_incref_(src); 915 extra->source = src; 916 /* reference source chain which now becomes immutable */ 917 evbuffer_chain_incref(chain); 918 extra->parent = chain; 919 chain->flags |= EVBUFFER_IMMUTABLE; 920 tmp->buffer_len = chain->buffer_len; 921 tmp->misalign = chain->misalign; 922 tmp->off = chain->off; 923 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; 924 tmp->buffer = chain->buffer; 925 evbuffer_chain_insert(dst, tmp); 926 } 927 } 928 929 static void 930 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 931 { 932 ASSERT_EVBUFFER_LOCKED(dst); 933 ASSERT_EVBUFFER_LOCKED(src); 934 src->last->next = dst->first; 935 dst->first = src->first; 936 dst->total_len += src->total_len; 937 if (*dst->last_with_datap == NULL) { 938 if (src->last_with_datap == &(src)->first) 939 dst->last_with_datap = &dst->first; 940 else 941 dst->last_with_datap = src->last_with_datap; 942 } else if (dst->last_with_datap == &dst->first) { 943 dst->last_with_datap = &src->last->next; 944 } 945 } 946 947 int 948 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 949 { 950 struct evbuffer_chain *pinned, *last; 951 size_t in_total_len, out_total_len; 952 int result = 0; 953 954 EVBUFFER_LOCK2(inbuf, outbuf); 955 in_total_len = inbuf->total_len; 956 out_total_len = outbuf->total_len; 957 958 if (in_total_len == 0 || outbuf == inbuf) 959 goto done; 960 961 if (outbuf->freeze_end || inbuf->freeze_start) { 962 result = -1; 963 goto done; 964 } 965 966 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 967 result = -1; 968 goto done; 969 } 970 971 if (out_total_len == 0) { 972 /* There might be an empty chain at the start of outbuf; free 973 * it. */ 974 evbuffer_free_all_chains(outbuf->first); 975 COPY_CHAIN(outbuf, inbuf); 976 } else { 977 APPEND_CHAIN(outbuf, inbuf); 978 } 979 980 RESTORE_PINNED(inbuf, pinned, last); 981 982 inbuf->n_del_for_cb += in_total_len; 983 outbuf->n_add_for_cb += in_total_len; 984 985 evbuffer_invoke_callbacks(inbuf); 986 evbuffer_invoke_callbacks(outbuf); 987 988 done: 989 EVBUFFER_UNLOCK2(inbuf, outbuf); 990 return result; 991 } 992 993 int 994 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) 995 { 996 size_t in_total_len, out_total_len; 997 struct evbuffer_chain *chain; 998 int result = 0; 999 1000 EVBUFFER_LOCK2(inbuf, outbuf); 1001 in_total_len = inbuf->total_len; 1002 out_total_len = outbuf->total_len; 1003 chain = inbuf->first; 1004 1005 if (in_total_len == 0) 1006 goto done; 1007 1008 if (outbuf->freeze_end || outbuf == inbuf) { 1009 result = -1; 1010 goto done; 1011 } 1012 1013 for (; chain; chain = chain->next) { 1014 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { 1015 /* chain type can not be referenced */ 1016 result = -1; 1017 goto done; 1018 } 1019 } 1020 1021 if (out_total_len == 0) { 1022 /* There might be an empty chain at the start of outbuf; free 1023 * it. */ 1024 evbuffer_free_all_chains(outbuf->first); 1025 } 1026 APPEND_CHAIN_MULTICAST(outbuf, inbuf); 1027 1028 outbuf->n_add_for_cb += in_total_len; 1029 evbuffer_invoke_callbacks(outbuf); 1030 1031 done: 1032 EVBUFFER_UNLOCK2(inbuf, outbuf); 1033 return result; 1034 } 1035 1036 int 1037 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 1038 { 1039 struct evbuffer_chain *pinned, *last; 1040 size_t in_total_len, out_total_len; 1041 int result = 0; 1042 1043 EVBUFFER_LOCK2(inbuf, outbuf); 1044 1045 in_total_len = inbuf->total_len; 1046 out_total_len = outbuf->total_len; 1047 1048 if (!in_total_len || inbuf == outbuf) 1049 goto done; 1050 1051 if (outbuf->freeze_start || inbuf->freeze_start) { 1052 result = -1; 1053 goto done; 1054 } 1055 1056 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 1057 result = -1; 1058 goto done; 1059 } 1060 1061 if (out_total_len == 0) { 1062 /* There might be an empty chain at the start of outbuf; free 1063 * it. */ 1064 evbuffer_free_all_chains(outbuf->first); 1065 COPY_CHAIN(outbuf, inbuf); 1066 } else { 1067 PREPEND_CHAIN(outbuf, inbuf); 1068 } 1069 1070 RESTORE_PINNED(inbuf, pinned, last); 1071 1072 inbuf->n_del_for_cb += in_total_len; 1073 outbuf->n_add_for_cb += in_total_len; 1074 1075 evbuffer_invoke_callbacks(inbuf); 1076 evbuffer_invoke_callbacks(outbuf); 1077 done: 1078 EVBUFFER_UNLOCK2(inbuf, outbuf); 1079 return result; 1080 } 1081 1082 int 1083 evbuffer_drain(struct evbuffer *buf, size_t len) 1084 { 1085 struct evbuffer_chain *chain, *next; 1086 size_t remaining, old_len; 1087 int result = 0; 1088 1089 EVBUFFER_LOCK(buf); 1090 old_len = buf->total_len; 1091 1092 if (old_len == 0) 1093 goto done; 1094 1095 if (buf->freeze_start) { 1096 result = -1; 1097 goto done; 1098 } 1099 1100 if (len >= old_len && !HAS_PINNED_R(buf)) { 1101 len = old_len; 1102 for (chain = buf->first; chain != NULL; chain = next) { 1103 next = chain->next; 1104 evbuffer_chain_free(chain); 1105 } 1106 1107 ZERO_CHAIN(buf); 1108 } else { 1109 if (len >= old_len) 1110 len = old_len; 1111 1112 buf->total_len -= len; 1113 remaining = len; 1114 for (chain = buf->first; 1115 remaining >= chain->off; 1116 chain = next) { 1117 next = chain->next; 1118 remaining -= chain->off; 1119 1120 if (chain == *buf->last_with_datap) { 1121 buf->last_with_datap = &buf->first; 1122 } 1123 if (&chain->next == buf->last_with_datap) 1124 buf->last_with_datap = &buf->first; 1125 1126 if (CHAIN_PINNED_R(chain)) { 1127 EVUTIL_ASSERT(remaining == 0); 1128 chain->misalign += chain->off; 1129 chain->off = 0; 1130 break; 1131 } else 1132 evbuffer_chain_free(chain); 1133 } 1134 1135 buf->first = chain; 1136 chain->misalign += remaining; 1137 chain->off -= remaining; 1138 } 1139 1140 buf->n_del_for_cb += len; 1141 /* Tell someone about changes in this buffer */ 1142 evbuffer_invoke_callbacks(buf); 1143 1144 done: 1145 EVBUFFER_UNLOCK(buf); 1146 return result; 1147 } 1148 1149 /* Reads data from an event buffer and drains the bytes read */ 1150 int 1151 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1152 { 1153 ev_ssize_t n; 1154 EVBUFFER_LOCK(buf); 1155 n = evbuffer_copyout_from(buf, NULL, data_out, datlen); 1156 if (n > 0) { 1157 if (evbuffer_drain(buf, n)<0) 1158 n = -1; 1159 } 1160 EVBUFFER_UNLOCK(buf); 1161 return (int)n; 1162 } 1163 1164 ev_ssize_t 1165 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1166 { 1167 return evbuffer_copyout_from(buf, NULL, data_out, datlen); 1168 } 1169 1170 ev_ssize_t 1171 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, 1172 void *data_out, size_t datlen) 1173 { 1174 /*XXX fails badly on sendfile case. */ 1175 struct evbuffer_chain *chain; 1176 char *data = data_out; 1177 size_t nread; 1178 ev_ssize_t result = 0; 1179 size_t pos_in_chain; 1180 1181 EVBUFFER_LOCK(buf); 1182 1183 if (pos) { 1184 chain = pos->internal_.chain; 1185 pos_in_chain = pos->internal_.pos_in_chain; 1186 if (datlen + pos->pos > buf->total_len) 1187 datlen = buf->total_len - pos->pos; 1188 } else { 1189 chain = buf->first; 1190 pos_in_chain = 0; 1191 if (datlen > buf->total_len) 1192 datlen = buf->total_len; 1193 } 1194 1195 1196 if (datlen == 0) 1197 goto done; 1198 1199 if (buf->freeze_start) { 1200 result = -1; 1201 goto done; 1202 } 1203 1204 nread = datlen; 1205 1206 while (datlen && datlen >= chain->off - pos_in_chain) { 1207 size_t copylen = chain->off - pos_in_chain; 1208 memcpy(data, 1209 chain->buffer + chain->misalign + pos_in_chain, 1210 copylen); 1211 data += copylen; 1212 datlen -= copylen; 1213 1214 chain = chain->next; 1215 pos_in_chain = 0; 1216 EVUTIL_ASSERT(chain || datlen==0); 1217 } 1218 1219 if (datlen) { 1220 EVUTIL_ASSERT(chain); 1221 memcpy(data, chain->buffer + chain->misalign + pos_in_chain, 1222 datlen); 1223 } 1224 1225 result = nread; 1226 done: 1227 EVBUFFER_UNLOCK(buf); 1228 return result; 1229 } 1230 1231 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1232 * possible. */ 1233 /* XXXX should return ev_ssize_t */ 1234 int 1235 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1236 size_t datlen) 1237 { 1238 /*XXX We should have an option to force this to be zero-copy.*/ 1239 1240 /*XXX can fail badly on sendfile case. */ 1241 struct evbuffer_chain *chain, *previous; 1242 size_t nread = 0; 1243 int result; 1244 1245 EVBUFFER_LOCK2(src, dst); 1246 1247 chain = previous = src->first; 1248 1249 if (datlen == 0 || dst == src) { 1250 result = 0; 1251 goto done; 1252 } 1253 1254 if (dst->freeze_end || src->freeze_start) { 1255 result = -1; 1256 goto done; 1257 } 1258 1259 /* short-cut if there is no more data buffered */ 1260 if (datlen >= src->total_len) { 1261 datlen = src->total_len; 1262 evbuffer_add_buffer(dst, src); 1263 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1264 goto done; 1265 } 1266 1267 /* removes chains if possible */ 1268 while (chain->off <= datlen) { 1269 /* We can't remove the last with data from src unless we 1270 * remove all chains, in which case we would have done the if 1271 * block above */ 1272 EVUTIL_ASSERT(chain != *src->last_with_datap); 1273 nread += chain->off; 1274 datlen -= chain->off; 1275 previous = chain; 1276 if (src->last_with_datap == &chain->next) 1277 src->last_with_datap = &src->first; 1278 chain = chain->next; 1279 } 1280 1281 if (nread) { 1282 /* we can remove the chain */ 1283 struct evbuffer_chain **chp; 1284 chp = evbuffer_free_trailing_empty_chains(dst); 1285 1286 if (dst->first == NULL) { 1287 dst->first = src->first; 1288 } else { 1289 *chp = src->first; 1290 } 1291 dst->last = previous; 1292 previous->next = NULL; 1293 src->first = chain; 1294 advance_last_with_data(dst); 1295 1296 dst->total_len += nread; 1297 dst->n_add_for_cb += nread; 1298 } 1299 1300 /* we know that there is more data in the src buffer than 1301 * we want to read, so we manually drain the chain */ 1302 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1303 chain->misalign += datlen; 1304 chain->off -= datlen; 1305 nread += datlen; 1306 1307 /* You might think we would want to increment dst->n_add_for_cb 1308 * here too. But evbuffer_add above already took care of that. 1309 */ 1310 src->total_len -= nread; 1311 src->n_del_for_cb += nread; 1312 1313 if (nread) { 1314 evbuffer_invoke_callbacks(dst); 1315 evbuffer_invoke_callbacks(src); 1316 } 1317 result = (int)nread;/*XXXX should change return type */ 1318 1319 done: 1320 EVBUFFER_UNLOCK2(src, dst); 1321 return result; 1322 } 1323 1324 unsigned char * 1325 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1326 { 1327 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1328 unsigned char *buffer, *result = NULL; 1329 ev_ssize_t remaining; 1330 int removed_last_with_data = 0; 1331 int removed_last_with_datap = 0; 1332 1333 EVBUFFER_LOCK(buf); 1334 1335 chain = buf->first; 1336 1337 if (size < 0) 1338 size = buf->total_len; 1339 /* if size > buf->total_len, we cannot guarantee to the user that she 1340 * is going to have a long enough buffer afterwards; so we return 1341 * NULL */ 1342 if (size == 0 || (size_t)size > buf->total_len) 1343 goto done; 1344 1345 /* No need to pull up anything; the first size bytes are 1346 * already here. */ 1347 if (chain->off >= (size_t)size) { 1348 result = chain->buffer + chain->misalign; 1349 goto done; 1350 } 1351 1352 /* Make sure that none of the chains we need to copy from is pinned. */ 1353 remaining = size - chain->off; 1354 EVUTIL_ASSERT(remaining >= 0); 1355 for (tmp=chain->next; tmp; tmp=tmp->next) { 1356 if (CHAIN_PINNED(tmp)) 1357 goto done; 1358 if (tmp->off >= (size_t)remaining) 1359 break; 1360 remaining -= tmp->off; 1361 } 1362 1363 if (CHAIN_PINNED(chain)) { 1364 size_t old_off = chain->off; 1365 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1366 /* not enough room at end of chunk. */ 1367 goto done; 1368 } 1369 buffer = CHAIN_SPACE_PTR(chain); 1370 tmp = chain; 1371 tmp->off = size; 1372 size -= old_off; 1373 chain = chain->next; 1374 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1375 /* already have enough space in the first chain */ 1376 size_t old_off = chain->off; 1377 buffer = chain->buffer + chain->misalign + chain->off; 1378 tmp = chain; 1379 tmp->off = size; 1380 size -= old_off; 1381 chain = chain->next; 1382 } else { 1383 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1384 event_warn("%s: out of memory", __func__); 1385 goto done; 1386 } 1387 buffer = tmp->buffer; 1388 tmp->off = size; 1389 buf->first = tmp; 1390 } 1391 1392 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1393 1394 /* Copy and free every chunk that will be entirely pulled into tmp */ 1395 last_with_data = *buf->last_with_datap; 1396 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1397 next = chain->next; 1398 1399 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1400 size -= chain->off; 1401 buffer += chain->off; 1402 if (chain == last_with_data) 1403 removed_last_with_data = 1; 1404 if (&chain->next == buf->last_with_datap) 1405 removed_last_with_datap = 1; 1406 1407 evbuffer_chain_free(chain); 1408 } 1409 1410 if (chain != NULL) { 1411 memcpy(buffer, chain->buffer + chain->misalign, size); 1412 chain->misalign += size; 1413 chain->off -= size; 1414 } else { 1415 buf->last = tmp; 1416 } 1417 1418 tmp->next = chain; 1419 1420 if (removed_last_with_data) { 1421 buf->last_with_datap = &buf->first; 1422 } else if (removed_last_with_datap) { 1423 if (buf->first->next && buf->first->next->off) 1424 buf->last_with_datap = &buf->first->next; 1425 else 1426 buf->last_with_datap = &buf->first; 1427 } 1428 1429 result = (tmp->buffer + tmp->misalign); 1430 1431 done: 1432 EVBUFFER_UNLOCK(buf); 1433 return result; 1434 } 1435 1436 /* 1437 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1438 * The returned buffer needs to be freed by the called. 1439 */ 1440 char * 1441 evbuffer_readline(struct evbuffer *buffer) 1442 { 1443 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1444 } 1445 1446 static inline ev_ssize_t 1447 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1448 { 1449 struct evbuffer_chain *chain = it->internal_.chain; 1450 size_t i = it->internal_.pos_in_chain; 1451 while (chain != NULL) { 1452 char *buffer = (char *)chain->buffer + chain->misalign; 1453 char *cp = memchr(buffer+i, chr, chain->off-i); 1454 if (cp) { 1455 it->internal_.chain = chain; 1456 it->internal_.pos_in_chain = cp - buffer; 1457 it->pos += (cp - buffer - i); 1458 return it->pos; 1459 } 1460 it->pos += chain->off - i; 1461 i = 0; 1462 chain = chain->next; 1463 } 1464 1465 return (-1); 1466 } 1467 1468 static inline char * 1469 find_eol_char(char *s, size_t len) 1470 { 1471 #define CHUNK_SZ 128 1472 /* Lots of benchmarking found this approach to be faster in practice 1473 * than doing two memchrs over the whole buffer, doin a memchr on each 1474 * char of the buffer, or trying to emulate memchr by hand. */ 1475 char *s_end, *cr, *lf; 1476 s_end = s+len; 1477 while (s < s_end) { 1478 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1479 cr = memchr(s, '\r', chunk); 1480 lf = memchr(s, '\n', chunk); 1481 if (cr) { 1482 if (lf && lf < cr) 1483 return lf; 1484 return cr; 1485 } else if (lf) { 1486 return lf; 1487 } 1488 s += CHUNK_SZ; 1489 } 1490 1491 return NULL; 1492 #undef CHUNK_SZ 1493 } 1494 1495 static ev_ssize_t 1496 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1497 { 1498 struct evbuffer_chain *chain = it->internal_.chain; 1499 size_t i = it->internal_.pos_in_chain; 1500 while (chain != NULL) { 1501 char *buffer = (char *)chain->buffer + chain->misalign; 1502 char *cp = find_eol_char(buffer+i, chain->off-i); 1503 if (cp) { 1504 it->internal_.chain = chain; 1505 it->internal_.pos_in_chain = cp - buffer; 1506 it->pos += (cp - buffer) - i; 1507 return it->pos; 1508 } 1509 it->pos += chain->off - i; 1510 i = 0; 1511 chain = chain->next; 1512 } 1513 1514 return (-1); 1515 } 1516 1517 static inline int 1518 evbuffer_strspn( 1519 struct evbuffer_ptr *ptr, const char *chrset) 1520 { 1521 int count = 0; 1522 struct evbuffer_chain *chain = ptr->internal_.chain; 1523 size_t i = ptr->internal_.pos_in_chain; 1524 1525 if (!chain) 1526 return 0; 1527 1528 while (1) { 1529 char *buffer = (char *)chain->buffer + chain->misalign; 1530 for (; i < chain->off; ++i) { 1531 const char *p = chrset; 1532 while (*p) { 1533 if (buffer[i] == *p++) 1534 goto next; 1535 } 1536 ptr->internal_.chain = chain; 1537 ptr->internal_.pos_in_chain = i; 1538 ptr->pos += count; 1539 return count; 1540 next: 1541 ++count; 1542 } 1543 i = 0; 1544 1545 if (! chain->next) { 1546 ptr->internal_.chain = chain; 1547 ptr->internal_.pos_in_chain = i; 1548 ptr->pos += count; 1549 return count; 1550 } 1551 1552 chain = chain->next; 1553 } 1554 } 1555 1556 1557 static inline int 1558 evbuffer_getchr(struct evbuffer_ptr *it) 1559 { 1560 struct evbuffer_chain *chain = it->internal_.chain; 1561 size_t off = it->internal_.pos_in_chain; 1562 1563 if (chain == NULL) 1564 return -1; 1565 1566 return (unsigned char)chain->buffer[chain->misalign + off]; 1567 } 1568 1569 struct evbuffer_ptr 1570 evbuffer_search_eol(struct evbuffer *buffer, 1571 struct evbuffer_ptr *start, size_t *eol_len_out, 1572 enum evbuffer_eol_style eol_style) 1573 { 1574 struct evbuffer_ptr it, it2; 1575 size_t extra_drain = 0; 1576 int ok = 0; 1577 1578 /* Avoid locking in trivial edge cases */ 1579 if (start && start->internal_.chain == NULL) { 1580 PTR_NOT_FOUND(&it); 1581 if (eol_len_out) 1582 *eol_len_out = extra_drain; 1583 return it; 1584 } 1585 1586 EVBUFFER_LOCK(buffer); 1587 1588 if (start) { 1589 memcpy(&it, start, sizeof(it)); 1590 } else { 1591 it.pos = 0; 1592 it.internal_.chain = buffer->first; 1593 it.internal_.pos_in_chain = 0; 1594 } 1595 1596 /* the eol_style determines our first stop character and how many 1597 * characters we are going to drain afterwards. */ 1598 switch (eol_style) { 1599 case EVBUFFER_EOL_ANY: 1600 if (evbuffer_find_eol_char(&it) < 0) 1601 goto done; 1602 memcpy(&it2, &it, sizeof(it)); 1603 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1604 break; 1605 case EVBUFFER_EOL_CRLF_STRICT: { 1606 it = evbuffer_search(buffer, "\r\n", 2, &it); 1607 if (it.pos < 0) 1608 goto done; 1609 extra_drain = 2; 1610 break; 1611 } 1612 case EVBUFFER_EOL_CRLF: { 1613 ev_ssize_t start_pos = it.pos; 1614 /* Look for a LF ... */ 1615 if (evbuffer_strchr(&it, '\n') < 0) 1616 goto done; 1617 extra_drain = 1; 1618 /* ... optionally preceeded by a CR. */ 1619 if (it.pos == start_pos) 1620 break; /* If the first character is \n, don't back up */ 1621 /* This potentially does an extra linear walk over the first 1622 * few chains. Probably, that's not too expensive unless you 1623 * have a really pathological setup. */ 1624 memcpy(&it2, &it, sizeof(it)); 1625 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) 1626 break; 1627 if (evbuffer_getchr(&it2) == '\r') { 1628 memcpy(&it, &it2, sizeof(it)); 1629 extra_drain = 2; 1630 } 1631 break; 1632 } 1633 case EVBUFFER_EOL_LF: 1634 if (evbuffer_strchr(&it, '\n') < 0) 1635 goto done; 1636 extra_drain = 1; 1637 break; 1638 case EVBUFFER_EOL_NUL: 1639 if (evbuffer_strchr(&it, '\0') < 0) 1640 goto done; 1641 extra_drain = 1; 1642 break; 1643 default: 1644 goto done; 1645 } 1646 1647 ok = 1; 1648 done: 1649 EVBUFFER_UNLOCK(buffer); 1650 1651 if (!ok) 1652 PTR_NOT_FOUND(&it); 1653 if (eol_len_out) 1654 *eol_len_out = extra_drain; 1655 1656 return it; 1657 } 1658 1659 char * 1660 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1661 enum evbuffer_eol_style eol_style) 1662 { 1663 struct evbuffer_ptr it; 1664 char *line; 1665 size_t n_to_copy=0, extra_drain=0; 1666 char *result = NULL; 1667 1668 EVBUFFER_LOCK(buffer); 1669 1670 if (buffer->freeze_start) { 1671 goto done; 1672 } 1673 1674 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1675 if (it.pos < 0) 1676 goto done; 1677 n_to_copy = it.pos; 1678 1679 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1680 event_warn("%s: out of memory", __func__); 1681 goto done; 1682 } 1683 1684 evbuffer_remove(buffer, line, n_to_copy); 1685 line[n_to_copy] = '\0'; 1686 1687 evbuffer_drain(buffer, extra_drain); 1688 result = line; 1689 done: 1690 EVBUFFER_UNLOCK(buffer); 1691 1692 if (n_read_out) 1693 *n_read_out = result ? n_to_copy : 0; 1694 1695 return result; 1696 } 1697 1698 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1699 1700 /* Adds data to an event buffer */ 1701 1702 int 1703 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1704 { 1705 struct evbuffer_chain *chain, *tmp; 1706 const unsigned char *data = data_in; 1707 size_t remain, to_alloc; 1708 int result = -1; 1709 1710 EVBUFFER_LOCK(buf); 1711 1712 if (buf->freeze_end) { 1713 goto done; 1714 } 1715 1716 chain = buf->last; 1717 1718 /* If there are no chains allocated for this buffer, allocate one 1719 * big enough to hold all the data. */ 1720 if (chain == NULL) { 1721 chain = evbuffer_chain_new(datlen); 1722 if (!chain) 1723 goto done; 1724 evbuffer_chain_insert(buf, chain); 1725 } 1726 1727 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1728 remain = (size_t)(chain->buffer_len - chain->misalign - chain->off); 1729 if (remain >= datlen) { 1730 /* there's enough space to hold all the data in the 1731 * current last chain */ 1732 memcpy(chain->buffer + chain->misalign + chain->off, 1733 data, datlen); 1734 chain->off += datlen; 1735 buf->total_len += datlen; 1736 buf->n_add_for_cb += datlen; 1737 goto out; 1738 } else if (!CHAIN_PINNED(chain) && 1739 evbuffer_chain_should_realign(chain, datlen)) { 1740 /* we can fit the data into the misalignment */ 1741 evbuffer_chain_align(chain); 1742 1743 memcpy(chain->buffer + chain->off, data, datlen); 1744 chain->off += datlen; 1745 buf->total_len += datlen; 1746 buf->n_add_for_cb += datlen; 1747 goto out; 1748 } 1749 } else { 1750 /* we cannot write any data to the last chain */ 1751 remain = 0; 1752 } 1753 1754 /* we need to add another chain */ 1755 to_alloc = chain->buffer_len; 1756 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1757 to_alloc <<= 1; 1758 if (datlen > to_alloc) 1759 to_alloc = datlen; 1760 tmp = evbuffer_chain_new(to_alloc); 1761 if (tmp == NULL) 1762 goto done; 1763 1764 if (remain) { 1765 memcpy(chain->buffer + chain->misalign + chain->off, 1766 data, remain); 1767 chain->off += remain; 1768 buf->total_len += remain; 1769 buf->n_add_for_cb += remain; 1770 } 1771 1772 data += remain; 1773 datlen -= remain; 1774 1775 memcpy(tmp->buffer, data, datlen); 1776 tmp->off = datlen; 1777 evbuffer_chain_insert(buf, tmp); 1778 buf->n_add_for_cb += datlen; 1779 1780 out: 1781 evbuffer_invoke_callbacks(buf); 1782 result = 0; 1783 done: 1784 EVBUFFER_UNLOCK(buf); 1785 return result; 1786 } 1787 1788 int 1789 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1790 { 1791 struct evbuffer_chain *chain, *tmp; 1792 int result = -1; 1793 1794 EVBUFFER_LOCK(buf); 1795 1796 if (buf->freeze_start) { 1797 goto done; 1798 } 1799 1800 chain = buf->first; 1801 1802 if (chain == NULL) { 1803 chain = evbuffer_chain_new(datlen); 1804 if (!chain) 1805 goto done; 1806 evbuffer_chain_insert(buf, chain); 1807 } 1808 1809 /* we cannot touch immutable buffers */ 1810 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1811 /* If this chain is empty, we can treat it as 1812 * 'empty at the beginning' rather than 'empty at the end' */ 1813 if (chain->off == 0) 1814 chain->misalign = chain->buffer_len; 1815 1816 if ((size_t)chain->misalign >= datlen) { 1817 /* we have enough space to fit everything */ 1818 memcpy(chain->buffer + chain->misalign - datlen, 1819 data, datlen); 1820 chain->off += datlen; 1821 chain->misalign -= datlen; 1822 buf->total_len += datlen; 1823 buf->n_add_for_cb += datlen; 1824 goto out; 1825 } else if (chain->misalign) { 1826 /* we can only fit some of the data. */ 1827 memcpy(chain->buffer, 1828 (char*)data + datlen - chain->misalign, 1829 (size_t)chain->misalign); 1830 chain->off += (size_t)chain->misalign; 1831 buf->total_len += (size_t)chain->misalign; 1832 buf->n_add_for_cb += (size_t)chain->misalign; 1833 datlen -= (size_t)chain->misalign; 1834 chain->misalign = 0; 1835 } 1836 } 1837 1838 /* we need to add another chain */ 1839 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1840 goto done; 1841 buf->first = tmp; 1842 if (buf->last_with_datap == &buf->first) 1843 buf->last_with_datap = &tmp->next; 1844 1845 tmp->next = chain; 1846 1847 tmp->off = datlen; 1848 tmp->misalign = tmp->buffer_len - datlen; 1849 1850 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1851 buf->total_len += datlen; 1852 buf->n_add_for_cb += (size_t)chain->misalign; 1853 1854 out: 1855 evbuffer_invoke_callbacks(buf); 1856 result = 0; 1857 done: 1858 EVBUFFER_UNLOCK(buf); 1859 return result; 1860 } 1861 1862 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1863 static void 1864 evbuffer_chain_align(struct evbuffer_chain *chain) 1865 { 1866 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1867 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1868 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1869 chain->misalign = 0; 1870 } 1871 1872 #define MAX_TO_COPY_IN_EXPAND 4096 1873 #define MAX_TO_REALIGN_IN_EXPAND 2048 1874 1875 /** Helper: return true iff we should realign chain to fit datalen bytes of 1876 data in it. */ 1877 static int 1878 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1879 size_t datlen) 1880 { 1881 return chain->buffer_len - chain->off >= datlen && 1882 (chain->off < chain->buffer_len / 2) && 1883 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1884 } 1885 1886 /* Expands the available space in the event buffer to at least datlen, all in 1887 * a single chunk. Return that chunk. */ 1888 static struct evbuffer_chain * 1889 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1890 { 1891 struct evbuffer_chain *chain, **chainp; 1892 struct evbuffer_chain *result = NULL; 1893 ASSERT_EVBUFFER_LOCKED(buf); 1894 1895 chainp = buf->last_with_datap; 1896 1897 /* XXX If *chainp is no longer writeable, but has enough space in its 1898 * misalign, this might be a bad idea: we could still use *chainp, not 1899 * (*chainp)->next. */ 1900 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1901 chainp = &(*chainp)->next; 1902 1903 /* 'chain' now points to the first chain with writable space (if any) 1904 * We will either use it, realign it, replace it, or resize it. */ 1905 chain = *chainp; 1906 1907 if (chain == NULL || 1908 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1909 /* We can't use the last_with_data chain at all. Just add a 1910 * new one that's big enough. */ 1911 goto insert_new; 1912 } 1913 1914 /* If we can fit all the data, then we don't have to do anything */ 1915 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1916 result = chain; 1917 goto ok; 1918 } 1919 1920 /* If the chain is completely empty, just replace it by adding a new 1921 * empty chain. */ 1922 if (chain->off == 0) { 1923 goto insert_new; 1924 } 1925 1926 /* If the misalignment plus the remaining space fulfills our data 1927 * needs, we could just force an alignment to happen. Afterwards, we 1928 * have enough space. But only do this if we're saving a lot of space 1929 * and not moving too much data. Otherwise the space savings are 1930 * probably offset by the time lost in copying. 1931 */ 1932 if (evbuffer_chain_should_realign(chain, datlen)) { 1933 evbuffer_chain_align(chain); 1934 result = chain; 1935 goto ok; 1936 } 1937 1938 /* At this point, we can either resize the last chunk with space in 1939 * it, use the next chunk after it, or If we add a new chunk, we waste 1940 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1941 * resize, we have to copy chain->off bytes. 1942 */ 1943 1944 /* Would expanding this chunk be affordable and worthwhile? */ 1945 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1946 chain->off > MAX_TO_COPY_IN_EXPAND) { 1947 /* It's not worth resizing this chain. Can the next one be 1948 * used? */ 1949 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 1950 /* Yes, we can just use the next chain (which should 1951 * be empty. */ 1952 result = chain->next; 1953 goto ok; 1954 } else { 1955 /* No; append a new chain (which will free all 1956 * terminal empty chains.) */ 1957 goto insert_new; 1958 } 1959 } else { 1960 /* Okay, we're going to try to resize this chain: Not doing so 1961 * would waste at least 1/8 of its current allocation, and we 1962 * can do so without having to copy more than 1963 * MAX_TO_COPY_IN_EXPAND bytes. */ 1964 /* figure out how much space we need */ 1965 size_t length = chain->off + datlen; 1966 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 1967 if (tmp == NULL) 1968 goto err; 1969 1970 /* copy the data over that we had so far */ 1971 tmp->off = chain->off; 1972 memcpy(tmp->buffer, chain->buffer + chain->misalign, 1973 chain->off); 1974 /* fix up the list */ 1975 EVUTIL_ASSERT(*chainp == chain); 1976 result = *chainp = tmp; 1977 1978 if (buf->last == chain) 1979 buf->last = tmp; 1980 1981 tmp->next = chain->next; 1982 evbuffer_chain_free(chain); 1983 goto ok; 1984 } 1985 1986 insert_new: 1987 result = evbuffer_chain_insert_new(buf, datlen); 1988 if (!result) 1989 goto err; 1990 ok: 1991 EVUTIL_ASSERT(result); 1992 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 1993 err: 1994 return result; 1995 } 1996 1997 /* Make sure that datlen bytes are available for writing in the last n 1998 * chains. Never copies or moves data. */ 1999 int 2000 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) 2001 { 2002 struct evbuffer_chain *chain = buf->last, *tmp, *next; 2003 size_t avail; 2004 int used; 2005 2006 ASSERT_EVBUFFER_LOCKED(buf); 2007 EVUTIL_ASSERT(n >= 2); 2008 2009 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 2010 /* There is no last chunk, or we can't touch the last chunk. 2011 * Just add a new chunk. */ 2012 chain = evbuffer_chain_new(datlen); 2013 if (chain == NULL) 2014 return (-1); 2015 2016 evbuffer_chain_insert(buf, chain); 2017 return (0); 2018 } 2019 2020 used = 0; /* number of chains we're using space in. */ 2021 avail = 0; /* how much space they have. */ 2022 /* How many bytes can we stick at the end of buffer as it is? Iterate 2023 * over the chains at the end of the buffer, tring to see how much 2024 * space we have in the first n. */ 2025 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 2026 if (chain->off) { 2027 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 2028 EVUTIL_ASSERT(chain == *buf->last_with_datap); 2029 if (space) { 2030 avail += space; 2031 ++used; 2032 } 2033 } else { 2034 /* No data in chain; realign it. */ 2035 chain->misalign = 0; 2036 avail += chain->buffer_len; 2037 ++used; 2038 } 2039 if (avail >= datlen) { 2040 /* There is already enough space. Just return */ 2041 return (0); 2042 } 2043 if (used == n) 2044 break; 2045 } 2046 2047 /* There wasn't enough space in the first n chains with space in 2048 * them. Either add a new chain with enough space, or replace all 2049 * empty chains with one that has enough space, depending on n. */ 2050 if (used < n) { 2051 /* The loop ran off the end of the chains before it hit n 2052 * chains; we can add another. */ 2053 EVUTIL_ASSERT(chain == NULL); 2054 2055 tmp = evbuffer_chain_new(datlen - avail); 2056 if (tmp == NULL) 2057 return (-1); 2058 2059 buf->last->next = tmp; 2060 buf->last = tmp; 2061 /* (we would only set last_with_data if we added the first 2062 * chain. But if the buffer had no chains, we would have 2063 * just allocated a new chain earlier) */ 2064 return (0); 2065 } else { 2066 /* Nuke _all_ the empty chains. */ 2067 int rmv_all = 0; /* True iff we removed last_with_data. */ 2068 chain = *buf->last_with_datap; 2069 if (!chain->off) { 2070 EVUTIL_ASSERT(chain == buf->first); 2071 rmv_all = 1; 2072 avail = 0; 2073 } else { 2074 avail = (size_t) CHAIN_SPACE_LEN(chain); 2075 chain = chain->next; 2076 } 2077 2078 2079 for (; chain; chain = next) { 2080 next = chain->next; 2081 EVUTIL_ASSERT(chain->off == 0); 2082 evbuffer_chain_free(chain); 2083 } 2084 tmp = evbuffer_chain_new(datlen - avail); 2085 if (tmp == NULL) { 2086 if (rmv_all) { 2087 ZERO_CHAIN(buf); 2088 } else { 2089 buf->last = *buf->last_with_datap; 2090 (*buf->last_with_datap)->next = NULL; 2091 } 2092 return (-1); 2093 } 2094 2095 if (rmv_all) { 2096 buf->first = buf->last = tmp; 2097 buf->last_with_datap = &buf->first; 2098 } else { 2099 (*buf->last_with_datap)->next = tmp; 2100 buf->last = tmp; 2101 } 2102 return (0); 2103 } 2104 } 2105 2106 int 2107 evbuffer_expand(struct evbuffer *buf, size_t datlen) 2108 { 2109 struct evbuffer_chain *chain; 2110 2111 EVBUFFER_LOCK(buf); 2112 chain = evbuffer_expand_singlechain(buf, datlen); 2113 EVBUFFER_UNLOCK(buf); 2114 return chain ? 0 : -1; 2115 } 2116 2117 /* 2118 * Reads data from a file descriptor into a buffer. 2119 */ 2120 2121 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) 2122 #define USE_IOVEC_IMPL 2123 #endif 2124 2125 #ifdef USE_IOVEC_IMPL 2126 2127 #ifdef EVENT__HAVE_SYS_UIO_H 2128 /* number of iovec we use for writev, fragmentation is going to determine 2129 * how much we end up writing */ 2130 2131 #define DEFAULT_WRITE_IOVEC 128 2132 2133 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 2134 #define NUM_WRITE_IOVEC UIO_MAXIOV 2135 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 2136 #define NUM_WRITE_IOVEC IOV_MAX 2137 #else 2138 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 2139 #endif 2140 2141 #define IOV_TYPE struct iovec 2142 #define IOV_PTR_FIELD iov_base 2143 #define IOV_LEN_FIELD iov_len 2144 #define IOV_LEN_TYPE size_t 2145 #else 2146 #define NUM_WRITE_IOVEC 16 2147 #define IOV_TYPE WSABUF 2148 #define IOV_PTR_FIELD buf 2149 #define IOV_LEN_FIELD len 2150 #define IOV_LEN_TYPE unsigned long 2151 #endif 2152 #endif 2153 #define NUM_READ_IOVEC 4 2154 2155 #define EVBUFFER_MAX_READ 4096 2156 2157 /** Helper function to figure out which space to use for reading data into 2158 an evbuffer. Internal use only. 2159 2160 @param buf The buffer to read into 2161 @param howmuch How much we want to read. 2162 @param vecs An array of two or more iovecs or WSABUFs. 2163 @param n_vecs_avail The length of vecs 2164 @param chainp A pointer to a variable to hold the first chain we're 2165 reading into. 2166 @param exact Boolean: if true, we do not provide more than 'howmuch' 2167 space in the vectors, even if more space is available. 2168 @return The number of buffers we're using. 2169 */ 2170 int 2171 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, 2172 struct evbuffer_iovec *vecs, int n_vecs_avail, 2173 struct evbuffer_chain ***chainp, int exact) 2174 { 2175 struct evbuffer_chain *chain; 2176 struct evbuffer_chain **firstchainp; 2177 size_t so_far; 2178 int i; 2179 ASSERT_EVBUFFER_LOCKED(buf); 2180 2181 if (howmuch < 0) 2182 return -1; 2183 2184 so_far = 0; 2185 /* Let firstchain be the first chain with any space on it */ 2186 firstchainp = buf->last_with_datap; 2187 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2188 firstchainp = &(*firstchainp)->next; 2189 } 2190 2191 chain = *firstchainp; 2192 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2193 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2194 if (avail > (howmuch - so_far) && exact) 2195 avail = howmuch - so_far; 2196 vecs[i].iov_base = CHAIN_SPACE_PTR(chain); 2197 vecs[i].iov_len = avail; 2198 so_far += avail; 2199 chain = chain->next; 2200 } 2201 2202 *chainp = firstchainp; 2203 return i; 2204 } 2205 2206 static int 2207 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2208 { 2209 #if defined(FIONREAD) && defined(_WIN32) 2210 unsigned long lng = EVBUFFER_MAX_READ; 2211 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2212 return -1; 2213 return (int)lng; 2214 #elif defined(FIONREAD) 2215 int n = EVBUFFER_MAX_READ; 2216 if (ioctl(fd, FIONREAD, &n) < 0) 2217 return -1; 2218 return n; 2219 #else 2220 return EVBUFFER_MAX_READ; 2221 #endif 2222 } 2223 2224 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2225 * as howmuch? */ 2226 int 2227 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2228 { 2229 struct evbuffer_chain **chainp; 2230 int n; 2231 int result; 2232 2233 #ifdef USE_IOVEC_IMPL 2234 int nvecs, i, remaining; 2235 #else 2236 struct evbuffer_chain *chain; 2237 unsigned char *p; 2238 #endif 2239 2240 EVBUFFER_LOCK(buf); 2241 2242 if (buf->freeze_end) { 2243 result = -1; 2244 goto done; 2245 } 2246 2247 n = get_n_bytes_readable_on_socket(fd); 2248 if (n <= 0 || n > EVBUFFER_MAX_READ) 2249 n = EVBUFFER_MAX_READ; 2250 if (howmuch < 0 || howmuch > n) 2251 howmuch = n; 2252 2253 #ifdef USE_IOVEC_IMPL 2254 /* Since we can use iovecs, we're willing to use the last 2255 * NUM_READ_IOVEC chains. */ 2256 if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { 2257 result = -1; 2258 goto done; 2259 } else { 2260 IOV_TYPE vecs[NUM_READ_IOVEC]; 2261 #ifdef EVBUFFER_IOVEC_IS_NATIVE_ 2262 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, 2263 NUM_READ_IOVEC, &chainp, 1); 2264 #else 2265 /* We aren't using the native struct iovec. Therefore, 2266 we are on win32. */ 2267 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2268 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, 2269 &chainp, 1); 2270 2271 for (i=0; i < nvecs; ++i) 2272 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2273 #endif 2274 2275 #ifdef _WIN32 2276 { 2277 DWORD bytesRead; 2278 DWORD flags=0; 2279 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2280 /* The read failed. It might be a close, 2281 * or it might be an error. */ 2282 if (WSAGetLastError() == WSAECONNABORTED) 2283 n = 0; 2284 else 2285 n = -1; 2286 } else 2287 n = bytesRead; 2288 } 2289 #else 2290 n = readv(fd, vecs, nvecs); 2291 #endif 2292 } 2293 2294 #else /*!USE_IOVEC_IMPL*/ 2295 /* If we don't have FIONREAD, we might waste some space here */ 2296 /* XXX we _will_ waste some space here if there is any space left 2297 * over on buf->last. */ 2298 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2299 result = -1; 2300 goto done; 2301 } 2302 2303 /* We can append new data at this point */ 2304 p = chain->buffer + chain->misalign + chain->off; 2305 2306 #ifndef _WIN32 2307 n = read(fd, p, howmuch); 2308 #else 2309 n = recv(fd, p, howmuch, 0); 2310 #endif 2311 #endif /* USE_IOVEC_IMPL */ 2312 2313 if (n == -1) { 2314 result = -1; 2315 goto done; 2316 } 2317 if (n == 0) { 2318 result = 0; 2319 goto done; 2320 } 2321 2322 #ifdef USE_IOVEC_IMPL 2323 remaining = n; 2324 for (i=0; i < nvecs; ++i) { 2325 ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp); 2326 if (space < remaining) { 2327 (*chainp)->off += space; 2328 remaining -= (int)space; 2329 } else { 2330 (*chainp)->off += remaining; 2331 buf->last_with_datap = chainp; 2332 break; 2333 } 2334 chainp = &(*chainp)->next; 2335 } 2336 #else 2337 chain->off += n; 2338 advance_last_with_data(buf); 2339 #endif 2340 buf->total_len += n; 2341 buf->n_add_for_cb += n; 2342 2343 /* Tell someone about changes in this buffer */ 2344 evbuffer_invoke_callbacks(buf); 2345 result = n; 2346 done: 2347 EVBUFFER_UNLOCK(buf); 2348 return result; 2349 } 2350 2351 #ifdef USE_IOVEC_IMPL 2352 static inline int 2353 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2354 ev_ssize_t howmuch) 2355 { 2356 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2357 struct evbuffer_chain *chain = buffer->first; 2358 int n, i = 0; 2359 2360 if (howmuch < 0) 2361 return -1; 2362 2363 ASSERT_EVBUFFER_LOCKED(buffer); 2364 /* XXX make this top out at some maximal data length? if the 2365 * buffer has (say) 1MB in it, split over 128 chains, there's 2366 * no way it all gets written in one go. */ 2367 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2368 #ifdef USE_SENDFILE 2369 /* we cannot write the file info via writev */ 2370 if (chain->flags & EVBUFFER_SENDFILE) 2371 break; 2372 #endif 2373 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2374 if ((size_t)howmuch >= chain->off) { 2375 /* XXXcould be problematic when windows supports mmap*/ 2376 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2377 howmuch -= chain->off; 2378 } else { 2379 /* XXXcould be problematic when windows supports mmap*/ 2380 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2381 break; 2382 } 2383 chain = chain->next; 2384 } 2385 #ifdef _WIN32 2386 { 2387 DWORD bytesSent; 2388 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2389 n = -1; 2390 else 2391 n = bytesSent; 2392 } 2393 #else 2394 n = writev(fd, iov, i); 2395 #endif 2396 return (n); 2397 } 2398 #endif 2399 2400 #ifdef USE_SENDFILE 2401 static inline int 2402 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2403 ev_ssize_t howmuch) 2404 { 2405 struct evbuffer_chain *chain = buffer->first; 2406 struct evbuffer_chain_file_segment *info = 2407 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2408 chain); 2409 const int source_fd = info->segment->fd; 2410 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2411 int res; 2412 ev_off_t len = chain->off; 2413 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2414 ev_ssize_t res; 2415 ev_off_t offset = chain->misalign; 2416 #endif 2417 2418 ASSERT_EVBUFFER_LOCKED(buffer); 2419 2420 #if defined(SENDFILE_IS_MACOSX) 2421 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2422 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2423 return (-1); 2424 2425 return (len); 2426 #elif defined(SENDFILE_IS_FREEBSD) 2427 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2428 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2429 return (-1); 2430 2431 return (len); 2432 #elif defined(SENDFILE_IS_LINUX) 2433 /* TODO(niels): implement splice */ 2434 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2435 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2436 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2437 return (0); 2438 } 2439 return (res); 2440 #elif defined(SENDFILE_IS_SOLARIS) 2441 { 2442 const off_t offset_orig = offset; 2443 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2444 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2445 if (offset - offset_orig) 2446 return offset - offset_orig; 2447 /* if this is EAGAIN or EINTR and no bytes were 2448 * written, return 0 */ 2449 return (0); 2450 } 2451 return (res); 2452 } 2453 #endif 2454 } 2455 #endif 2456 2457 int 2458 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2459 ev_ssize_t howmuch) 2460 { 2461 int n = -1; 2462 2463 EVBUFFER_LOCK(buffer); 2464 2465 if (buffer->freeze_start) { 2466 goto done; 2467 } 2468 2469 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2470 howmuch = buffer->total_len; 2471 2472 if (howmuch > 0) { 2473 #ifdef USE_SENDFILE 2474 struct evbuffer_chain *chain = buffer->first; 2475 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2476 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2477 else { 2478 #endif 2479 #ifdef USE_IOVEC_IMPL 2480 n = evbuffer_write_iovec(buffer, fd, howmuch); 2481 #elif defined(_WIN32) 2482 /* XXX(nickm) Don't disable this code until we know if 2483 * the WSARecv code above works. */ 2484 void *p = evbuffer_pullup(buffer, howmuch); 2485 n = send(fd, p, howmuch, 0); 2486 #else 2487 void *p = evbuffer_pullup(buffer, howmuch); 2488 n = write(fd, p, howmuch); 2489 #endif 2490 #ifdef USE_SENDFILE 2491 } 2492 #endif 2493 } 2494 2495 if (n > 0) 2496 evbuffer_drain(buffer, n); 2497 2498 done: 2499 EVBUFFER_UNLOCK(buffer); 2500 return (n); 2501 } 2502 2503 int 2504 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2505 { 2506 return evbuffer_write_atmost(buffer, fd, -1); 2507 } 2508 2509 unsigned char * 2510 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2511 { 2512 unsigned char *search; 2513 struct evbuffer_ptr ptr; 2514 2515 EVBUFFER_LOCK(buffer); 2516 2517 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2518 if (ptr.pos < 0) { 2519 search = NULL; 2520 } else { 2521 search = evbuffer_pullup(buffer, ptr.pos + len); 2522 if (search) 2523 search += ptr.pos; 2524 } 2525 EVBUFFER_UNLOCK(buffer); 2526 return search; 2527 } 2528 2529 /* Subract <b>howfar</b> from the position of <b>pos</b> within 2530 * <b>buf</b>. Returns 0 on success, -1 on failure. 2531 * 2532 * This isn't exposed yet, because of potential inefficiency issues. 2533 * Maybe it should be. */ 2534 static int 2535 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 2536 size_t howfar) 2537 { 2538 if (howfar > (size_t)pos->pos) 2539 return -1; 2540 if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { 2541 pos->internal_.pos_in_chain -= howfar; 2542 pos->pos -= howfar; 2543 return 0; 2544 } else { 2545 const size_t newpos = pos->pos - howfar; 2546 /* Here's the inefficient part: it walks over the 2547 * chains until we hit newpos. */ 2548 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); 2549 } 2550 } 2551 2552 int 2553 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2554 size_t position, enum evbuffer_ptr_how how) 2555 { 2556 size_t left = position; 2557 struct evbuffer_chain *chain = NULL; 2558 int result = 0; 2559 2560 EVBUFFER_LOCK(buf); 2561 2562 switch (how) { 2563 case EVBUFFER_PTR_SET: 2564 chain = buf->first; 2565 pos->pos = position; 2566 position = 0; 2567 break; 2568 case EVBUFFER_PTR_ADD: 2569 /* this avoids iterating over all previous chains if 2570 we just want to advance the position */ 2571 chain = pos->internal_.chain; 2572 pos->pos += position; 2573 position = pos->internal_.pos_in_chain; 2574 break; 2575 } 2576 2577 while (chain && position + left >= chain->off) { 2578 left -= chain->off - position; 2579 chain = chain->next; 2580 position = 0; 2581 } 2582 if (chain) { 2583 pos->internal_.chain = chain; 2584 pos->internal_.pos_in_chain = position + left; 2585 } else if (left == 0) { 2586 /* The first byte in the (nonexistent) chain after the last chain */ 2587 pos->internal_.chain = NULL; 2588 pos->internal_.pos_in_chain = 0; 2589 } else { 2590 PTR_NOT_FOUND(pos); 2591 result = -1; 2592 } 2593 2594 EVBUFFER_UNLOCK(buf); 2595 2596 return result; 2597 } 2598 2599 /** 2600 Compare the bytes in buf at position pos to the len bytes in mem. Return 2601 less than 0, 0, or greater than 0 as memcmp. 2602 */ 2603 static int 2604 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2605 const char *mem, size_t len) 2606 { 2607 struct evbuffer_chain *chain; 2608 size_t position; 2609 int r; 2610 2611 ASSERT_EVBUFFER_LOCKED(buf); 2612 2613 if (pos->pos + len > buf->total_len) 2614 return -1; 2615 2616 chain = pos->internal_.chain; 2617 position = pos->internal_.pos_in_chain; 2618 while (len && chain) { 2619 size_t n_comparable; 2620 if (len + position > chain->off) 2621 n_comparable = chain->off - position; 2622 else 2623 n_comparable = len; 2624 r = memcmp(chain->buffer + chain->misalign + position, mem, 2625 n_comparable); 2626 if (r) 2627 return r; 2628 mem += n_comparable; 2629 len -= n_comparable; 2630 position = 0; 2631 chain = chain->next; 2632 } 2633 2634 return 0; 2635 } 2636 2637 struct evbuffer_ptr 2638 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2639 { 2640 return evbuffer_search_range(buffer, what, len, start, NULL); 2641 } 2642 2643 struct evbuffer_ptr 2644 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2645 { 2646 struct evbuffer_ptr pos; 2647 struct evbuffer_chain *chain, *last_chain = NULL; 2648 const unsigned char *p; 2649 char first; 2650 2651 EVBUFFER_LOCK(buffer); 2652 2653 if (start) { 2654 memcpy(&pos, start, sizeof(pos)); 2655 chain = pos.internal_.chain; 2656 } else { 2657 pos.pos = 0; 2658 chain = pos.internal_.chain = buffer->first; 2659 pos.internal_.pos_in_chain = 0; 2660 } 2661 2662 if (end) 2663 last_chain = end->internal_.chain; 2664 2665 if (!len || len > EV_SSIZE_MAX) 2666 goto done; 2667 2668 first = what[0]; 2669 2670 while (chain) { 2671 const unsigned char *start_at = 2672 chain->buffer + chain->misalign + 2673 pos.internal_.pos_in_chain; 2674 p = memchr(start_at, first, 2675 chain->off - pos.internal_.pos_in_chain); 2676 if (p) { 2677 pos.pos += p - start_at; 2678 pos.internal_.pos_in_chain += p - start_at; 2679 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2680 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2681 goto not_found; 2682 else 2683 goto done; 2684 } 2685 ++pos.pos; 2686 ++pos.internal_.pos_in_chain; 2687 if (pos.internal_.pos_in_chain == chain->off) { 2688 chain = pos.internal_.chain = chain->next; 2689 pos.internal_.pos_in_chain = 0; 2690 } 2691 } else { 2692 if (chain == last_chain) 2693 goto not_found; 2694 pos.pos += chain->off - pos.internal_.pos_in_chain; 2695 chain = pos.internal_.chain = chain->next; 2696 pos.internal_.pos_in_chain = 0; 2697 } 2698 } 2699 2700 not_found: 2701 PTR_NOT_FOUND(&pos); 2702 done: 2703 EVBUFFER_UNLOCK(buffer); 2704 return pos; 2705 } 2706 2707 int 2708 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2709 struct evbuffer_ptr *start_at, 2710 struct evbuffer_iovec *vec, int n_vec) 2711 { 2712 struct evbuffer_chain *chain; 2713 int idx = 0; 2714 ev_ssize_t len_so_far = 0; 2715 2716 /* Avoid locking in trivial edge cases */ 2717 if (start_at && start_at->internal_.chain == NULL) 2718 return 0; 2719 2720 EVBUFFER_LOCK(buffer); 2721 2722 if (start_at) { 2723 chain = start_at->internal_.chain; 2724 len_so_far = chain->off 2725 - start_at->internal_.pos_in_chain; 2726 idx = 1; 2727 if (n_vec > 0) { 2728 vec[0].iov_base = chain->buffer + chain->misalign 2729 + start_at->internal_.pos_in_chain; 2730 vec[0].iov_len = len_so_far; 2731 } 2732 chain = chain->next; 2733 } else { 2734 chain = buffer->first; 2735 } 2736 2737 if (n_vec == 0 && len < 0) { 2738 /* If no vectors are provided and they asked for "everything", 2739 * pretend they asked for the actual available amount. */ 2740 len = buffer->total_len - len_so_far; 2741 } 2742 2743 while (chain) { 2744 if (len >= 0 && len_so_far >= len) 2745 break; 2746 if (idx<n_vec) { 2747 vec[idx].iov_base = chain->buffer + chain->misalign; 2748 vec[idx].iov_len = chain->off; 2749 } else if (len<0) { 2750 break; 2751 } 2752 ++idx; 2753 len_so_far += chain->off; 2754 chain = chain->next; 2755 } 2756 2757 EVBUFFER_UNLOCK(buffer); 2758 2759 return idx; 2760 } 2761 2762 2763 int 2764 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2765 { 2766 char *buffer; 2767 size_t space; 2768 int sz, result = -1; 2769 va_list aq; 2770 struct evbuffer_chain *chain; 2771 2772 2773 EVBUFFER_LOCK(buf); 2774 2775 if (buf->freeze_end) { 2776 goto done; 2777 } 2778 2779 /* make sure that at least some space is available */ 2780 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2781 goto done; 2782 2783 for (;;) { 2784 #if 0 2785 size_t used = chain->misalign + chain->off; 2786 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2787 EVUTIL_ASSERT(chain->buffer_len >= used); 2788 space = chain->buffer_len - used; 2789 #endif 2790 buffer = (char*) CHAIN_SPACE_PTR(chain); 2791 space = (size_t) CHAIN_SPACE_LEN(chain); 2792 2793 #ifndef va_copy 2794 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2795 #endif 2796 va_copy(aq, ap); 2797 2798 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2799 2800 va_end(aq); 2801 2802 if (sz < 0) 2803 goto done; 2804 if ((size_t)sz < space) { 2805 chain->off += sz; 2806 buf->total_len += sz; 2807 buf->n_add_for_cb += sz; 2808 2809 advance_last_with_data(buf); 2810 evbuffer_invoke_callbacks(buf); 2811 result = sz; 2812 goto done; 2813 } 2814 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2815 goto done; 2816 } 2817 /* NOTREACHED */ 2818 2819 done: 2820 EVBUFFER_UNLOCK(buf); 2821 return result; 2822 } 2823 2824 int 2825 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2826 { 2827 int res = -1; 2828 va_list ap; 2829 2830 va_start(ap, fmt); 2831 res = evbuffer_add_vprintf(buf, fmt, ap); 2832 va_end(ap); 2833 2834 return (res); 2835 } 2836 2837 int 2838 evbuffer_add_reference(struct evbuffer *outbuf, 2839 const void *data, size_t datlen, 2840 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2841 { 2842 struct evbuffer_chain *chain; 2843 struct evbuffer_chain_reference *info; 2844 int result = -1; 2845 2846 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2847 if (!chain) 2848 return (-1); 2849 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2850 chain->buffer = (u_char *)data; 2851 chain->buffer_len = datlen; 2852 chain->off = datlen; 2853 2854 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2855 info->cleanupfn = cleanupfn; 2856 info->extra = extra; 2857 2858 EVBUFFER_LOCK(outbuf); 2859 if (outbuf->freeze_end) { 2860 /* don't call chain_free; we do not want to actually invoke 2861 * the cleanup function */ 2862 mm_free(chain); 2863 goto done; 2864 } 2865 evbuffer_chain_insert(outbuf, chain); 2866 outbuf->n_add_for_cb += datlen; 2867 2868 evbuffer_invoke_callbacks(outbuf); 2869 2870 result = 0; 2871 done: 2872 EVBUFFER_UNLOCK(outbuf); 2873 2874 return result; 2875 } 2876 2877 /* TODO(niels): we may want to add to automagically convert to mmap, in 2878 * case evbuffer_remove() or evbuffer_pullup() are being used. 2879 */ 2880 struct evbuffer_file_segment * 2881 evbuffer_file_segment_new( 2882 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2883 { 2884 struct evbuffer_file_segment *seg = 2885 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2886 if (!seg) 2887 return NULL; 2888 seg->refcnt = 1; 2889 seg->fd = fd; 2890 seg->flags = flags; 2891 seg->file_offset = offset; 2892 2893 #ifdef _WIN32 2894 #define lseek _lseeki64 2895 #define fstat _fstat 2896 #define stat _stat 2897 #endif 2898 if (length == -1) { 2899 struct stat st; 2900 if (fstat(fd, &st) < 0) 2901 goto err; 2902 length = st.st_size; 2903 } 2904 seg->length = length; 2905 2906 #if defined(USE_SENDFILE) 2907 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 2908 seg->can_sendfile = 1; 2909 goto done; 2910 } 2911 #endif 2912 2913 if (evbuffer_file_segment_materialize(seg)<0) 2914 goto err; 2915 2916 #if defined(USE_SENDFILE) 2917 done: 2918 #endif 2919 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 2920 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 2921 } 2922 return seg; 2923 err: 2924 mm_free(seg); 2925 return NULL; 2926 } 2927 2928 /* DOCDOC */ 2929 /* Requires lock */ 2930 static int 2931 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) 2932 { 2933 const unsigned flags = seg->flags; 2934 const int fd = seg->fd; 2935 const ev_off_t length = seg->length; 2936 const ev_off_t offset = seg->file_offset; 2937 2938 if (seg->contents) 2939 return 0; /* already materialized */ 2940 2941 #if defined(EVENT__HAVE_MMAP) 2942 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 2943 off_t offset_rounded = 0, offset_leftover = 0; 2944 void *mapped; 2945 if (offset) { 2946 /* mmap implementations don't generally like us 2947 * to have an offset that isn't a round */ 2948 #ifdef SC_PAGE_SIZE 2949 long page_size = sysconf(SC_PAGE_SIZE); 2950 #elif defined(_SC_PAGE_SIZE) 2951 long page_size = sysconf(_SC_PAGE_SIZE); 2952 #else 2953 long page_size = 1; 2954 #endif 2955 if (page_size == -1) 2956 goto err; 2957 offset_leftover = offset % page_size; 2958 offset_rounded = offset - offset_leftover; 2959 } 2960 mapped = mmap(NULL, length + offset_leftover, 2961 PROT_READ, 2962 #ifdef MAP_NOCACHE 2963 MAP_NOCACHE | /* ??? */ 2964 #endif 2965 #ifdef MAP_FILE 2966 MAP_FILE | 2967 #endif 2968 MAP_PRIVATE, 2969 fd, offset_rounded); 2970 if (mapped == MAP_FAILED) { 2971 event_warn("%s: mmap(%d, %d, %zu) failed", 2972 __func__, fd, 0, (size_t)(offset + length)); 2973 } else { 2974 seg->mapping = mapped; 2975 seg->contents = (char*)mapped+offset_leftover; 2976 seg->mmap_offset = 0; 2977 seg->is_mapping = 1; 2978 goto done; 2979 } 2980 } 2981 #endif 2982 #ifdef _WIN32 2983 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 2984 long h = (long)_get_osfhandle(fd); 2985 HANDLE m; 2986 ev_uint64_t total_size = length+offset; 2987 if (h == (long)INVALID_HANDLE_VALUE) 2988 goto err; 2989 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 2990 (total_size >> 32), total_size & 0xfffffffful, 2991 NULL); 2992 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 2993 seg->mapping_handle = m; 2994 seg->mmap_offset = offset; 2995 seg->is_mapping = 1; 2996 goto done; 2997 } 2998 } 2999 #endif 3000 { 3001 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 3002 ev_off_t read_so_far = 0; 3003 char *mem; 3004 int e; 3005 ev_ssize_t n = 0; 3006 if (!(mem = mm_malloc(length))) 3007 goto err; 3008 if (start_pos < 0) { 3009 mm_free(mem); 3010 goto err; 3011 } 3012 if (lseek(fd, offset, SEEK_SET) < 0) { 3013 mm_free(mem); 3014 goto err; 3015 } 3016 while (read_so_far < length) { 3017 n = read(fd, mem+read_so_far, length-read_so_far); 3018 if (n <= 0) 3019 break; 3020 read_so_far += n; 3021 } 3022 3023 e = errno; 3024 pos = lseek(fd, start_pos, SEEK_SET); 3025 if (n < 0 || (n == 0 && length > read_so_far)) { 3026 mm_free(mem); 3027 errno = e; 3028 goto err; 3029 } else if (pos < 0) { 3030 mm_free(mem); 3031 goto err; 3032 } 3033 3034 seg->contents = mem; 3035 } 3036 3037 done: 3038 return 0; 3039 err: 3040 return -1; 3041 } 3042 3043 void 3044 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 3045 { 3046 int refcnt; 3047 EVLOCK_LOCK(seg->lock, 0); 3048 refcnt = --seg->refcnt; 3049 EVLOCK_UNLOCK(seg->lock, 0); 3050 if (refcnt > 0) 3051 return; 3052 EVUTIL_ASSERT(refcnt == 0); 3053 3054 if (seg->is_mapping) { 3055 #ifdef _WIN32 3056 CloseHandle(seg->mapping_handle); 3057 #elif defined (EVENT__HAVE_MMAP) 3058 if (munmap(seg->mapping, seg->length) == -1) 3059 event_warn("%s: munmap failed", __func__); 3060 #endif 3061 } else if (seg->contents) { 3062 mm_free(seg->contents); 3063 } 3064 3065 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 3066 close(seg->fd); 3067 } 3068 3069 EVTHREAD_FREE_LOCK(seg->lock, 0); 3070 mm_free(seg); 3071 } 3072 3073 int 3074 evbuffer_add_file_segment(struct evbuffer *buf, 3075 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 3076 { 3077 struct evbuffer_chain *chain; 3078 struct evbuffer_chain_file_segment *extra; 3079 int can_use_sendfile = 0; 3080 3081 EVBUFFER_LOCK(buf); 3082 EVLOCK_LOCK(seg->lock, 0); 3083 if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { 3084 can_use_sendfile = 1; 3085 } else { 3086 if (!seg->contents) { 3087 if (evbuffer_file_segment_materialize(seg)<0) { 3088 EVLOCK_UNLOCK(seg->lock, 0); 3089 EVBUFFER_UNLOCK(buf); 3090 return -1; 3091 } 3092 } 3093 } 3094 ++seg->refcnt; 3095 EVLOCK_UNLOCK(seg->lock, 0); 3096 3097 if (buf->freeze_end) 3098 goto err; 3099 3100 if (length < 0) { 3101 if (offset > seg->length) 3102 goto err; 3103 length = seg->length - offset; 3104 } 3105 3106 /* Can we actually add this? */ 3107 if (offset+length > seg->length) 3108 goto err; 3109 3110 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 3111 if (!chain) 3112 goto err; 3113 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 3114 3115 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 3116 if (can_use_sendfile && seg->can_sendfile) { 3117 chain->flags |= EVBUFFER_SENDFILE; 3118 chain->misalign = seg->file_offset + offset; 3119 chain->off = length; 3120 chain->buffer_len = chain->misalign + length; 3121 } else if (seg->is_mapping) { 3122 #ifdef _WIN32 3123 ev_uint64_t total_offset = seg->mmap_offset+offset; 3124 ev_uint64_t offset_rounded=0, offset_remaining=0; 3125 LPVOID data; 3126 if (total_offset) { 3127 SYSTEM_INFO si; 3128 memset(&si, 0, sizeof(si)); /* cargo cult */ 3129 GetSystemInfo(&si); 3130 offset_remaining = total_offset % si.dwAllocationGranularity; 3131 offset_rounded = total_offset - offset_remaining; 3132 } 3133 data = MapViewOfFile( 3134 seg->mapping_handle, 3135 FILE_MAP_READ, 3136 offset_rounded >> 32, 3137 offset_rounded & 0xfffffffful, 3138 length + offset_remaining); 3139 if (data == NULL) { 3140 mm_free(chain); 3141 goto err; 3142 } 3143 chain->buffer = (unsigned char*) data; 3144 chain->buffer_len = length+offset_remaining; 3145 chain->misalign = offset_remaining; 3146 chain->off = length; 3147 #else 3148 chain->buffer = (unsigned char*)(seg->contents + offset); 3149 chain->buffer_len = length; 3150 chain->off = length; 3151 #endif 3152 } else { 3153 chain->buffer = (unsigned char*)(seg->contents + offset); 3154 chain->buffer_len = length; 3155 chain->off = length; 3156 } 3157 3158 extra->segment = seg; 3159 buf->n_add_for_cb += length; 3160 evbuffer_chain_insert(buf, chain); 3161 3162 evbuffer_invoke_callbacks(buf); 3163 3164 EVBUFFER_UNLOCK(buf); 3165 3166 return 0; 3167 err: 3168 EVBUFFER_UNLOCK(buf); 3169 evbuffer_file_segment_free(seg); 3170 return -1; 3171 } 3172 3173 int 3174 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 3175 { 3176 struct evbuffer_file_segment *seg; 3177 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 3178 int r; 3179 3180 seg = evbuffer_file_segment_new(fd, offset, length, flags); 3181 if (!seg) 3182 return -1; 3183 r = evbuffer_add_file_segment(buf, seg, 0, length); 3184 evbuffer_file_segment_free(seg); 3185 return r; 3186 } 3187 3188 void 3189 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 3190 { 3191 EVBUFFER_LOCK(buffer); 3192 3193 if (!LIST_EMPTY(&buffer->callbacks)) 3194 evbuffer_remove_all_callbacks(buffer); 3195 3196 if (cb) { 3197 struct evbuffer_cb_entry *ent = 3198 evbuffer_add_cb(buffer, NULL, cbarg); 3199 ent->cb.cb_obsolete = cb; 3200 ent->flags |= EVBUFFER_CB_OBSOLETE; 3201 } 3202 EVBUFFER_UNLOCK(buffer); 3203 } 3204 3205 struct evbuffer_cb_entry * 3206 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3207 { 3208 struct evbuffer_cb_entry *e; 3209 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 3210 return NULL; 3211 EVBUFFER_LOCK(buffer); 3212 e->cb.cb_func = cb; 3213 e->cbarg = cbarg; 3214 e->flags = EVBUFFER_CB_ENABLED; 3215 LIST_INSERT_HEAD(&buffer->callbacks, e, next); 3216 EVBUFFER_UNLOCK(buffer); 3217 return e; 3218 } 3219 3220 int 3221 evbuffer_remove_cb_entry(struct evbuffer *buffer, 3222 struct evbuffer_cb_entry *ent) 3223 { 3224 EVBUFFER_LOCK(buffer); 3225 LIST_REMOVE(ent, next); 3226 EVBUFFER_UNLOCK(buffer); 3227 mm_free(ent); 3228 return 0; 3229 } 3230 3231 int 3232 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3233 { 3234 struct evbuffer_cb_entry *cbent; 3235 int result = -1; 3236 EVBUFFER_LOCK(buffer); 3237 LIST_FOREACH(cbent, &buffer->callbacks, next) { 3238 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 3239 result = evbuffer_remove_cb_entry(buffer, cbent); 3240 goto done; 3241 } 3242 } 3243 done: 3244 EVBUFFER_UNLOCK(buffer); 3245 return result; 3246 } 3247 3248 int 3249 evbuffer_cb_set_flags(struct evbuffer *buffer, 3250 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3251 { 3252 /* the user isn't allowed to mess with these. */ 3253 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3254 EVBUFFER_LOCK(buffer); 3255 cb->flags |= flags; 3256 EVBUFFER_UNLOCK(buffer); 3257 return 0; 3258 } 3259 3260 int 3261 evbuffer_cb_clear_flags(struct evbuffer *buffer, 3262 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3263 { 3264 /* the user isn't allowed to mess with these. */ 3265 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3266 EVBUFFER_LOCK(buffer); 3267 cb->flags &= ~flags; 3268 EVBUFFER_UNLOCK(buffer); 3269 return 0; 3270 } 3271 3272 int 3273 evbuffer_freeze(struct evbuffer *buffer, int start) 3274 { 3275 EVBUFFER_LOCK(buffer); 3276 if (start) 3277 buffer->freeze_start = 1; 3278 else 3279 buffer->freeze_end = 1; 3280 EVBUFFER_UNLOCK(buffer); 3281 return 0; 3282 } 3283 3284 int 3285 evbuffer_unfreeze(struct evbuffer *buffer, int start) 3286 { 3287 EVBUFFER_LOCK(buffer); 3288 if (start) 3289 buffer->freeze_start = 0; 3290 else 3291 buffer->freeze_end = 0; 3292 EVBUFFER_UNLOCK(buffer); 3293 return 0; 3294 } 3295 3296 #if 0 3297 void 3298 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3299 { 3300 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3301 cb->size_before_suspend = evbuffer_get_length(buffer); 3302 cb->flags |= EVBUFFER_CB_SUSPENDED; 3303 } 3304 } 3305 3306 void 3307 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3308 { 3309 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3310 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3311 size_t sz = cb->size_before_suspend; 3312 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3313 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3314 cb->size_before_suspend = 0; 3315 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3316 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3317 } 3318 } 3319 } 3320 #endif 3321 3322