1 /* 2 * Copyright (c) 2002-2007 Niels Provos <[email protected]> 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 #include "evconfig-private.h" 30 31 #ifdef _WIN32 32 #include <winsock2.h> 33 #include <windows.h> 34 #include <io.h> 35 #endif 36 37 #ifdef EVENT__HAVE_VASPRINTF 38 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 39 * stdio.h. This comes from evconfig-private.h. 40 */ 41 #endif 42 43 #include <sys/types.h> 44 45 #ifdef EVENT__HAVE_SYS_TIME_H 46 #include <sys/time.h> 47 #endif 48 49 #ifdef EVENT__HAVE_SYS_SOCKET_H 50 #include <sys/socket.h> 51 #endif 52 53 #ifdef EVENT__HAVE_SYS_UIO_H 54 #include <sys/uio.h> 55 #endif 56 57 #ifdef EVENT__HAVE_SYS_IOCTL_H 58 #include <sys/ioctl.h> 59 #endif 60 61 #ifdef EVENT__HAVE_SYS_MMAN_H 62 #include <sys/mman.h> 63 #endif 64 65 #ifdef EVENT__HAVE_SYS_SENDFILE_H 66 #include <sys/sendfile.h> 67 #endif 68 #ifdef EVENT__HAVE_SYS_STAT_H 69 #include <sys/stat.h> 70 #endif 71 72 73 #include <errno.h> 74 #include <stdio.h> 75 #include <stdlib.h> 76 #include <string.h> 77 #ifdef EVENT__HAVE_STDARG_H 78 #include <stdarg.h> 79 #endif 80 #ifdef EVENT__HAVE_UNISTD_H 81 #include <unistd.h> 82 #endif 83 #include <limits.h> 84 85 #include "event2/event.h" 86 #include "event2/buffer.h" 87 #include "event2/buffer_compat.h" 88 #include "event2/bufferevent.h" 89 #include "event2/bufferevent_compat.h" 90 #include "event2/bufferevent_struct.h" 91 #include "event2/thread.h" 92 #include "log-internal.h" 93 #include "mm-internal.h" 94 #include "util-internal.h" 95 #include "evthread-internal.h" 96 #include "evbuffer-internal.h" 97 #include "bufferevent-internal.h" 98 99 /* some systems do not have MAP_FAILED */ 100 #ifndef MAP_FAILED 101 #define MAP_FAILED ((void *)-1) 102 #endif 103 104 /* send file support */ 105 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) 106 #define USE_SENDFILE 1 107 #define SENDFILE_IS_LINUX 1 108 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) 109 #define USE_SENDFILE 1 110 #define SENDFILE_IS_FREEBSD 1 111 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) 112 #define USE_SENDFILE 1 113 #define SENDFILE_IS_MACOSX 1 114 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 115 #define USE_SENDFILE 1 116 #define SENDFILE_IS_SOLARIS 1 117 #endif 118 119 /* Mask of user-selectable callback flags. */ 120 #define EVBUFFER_CB_USER_FLAGS 0xffff 121 /* Mask of all internal-use-only flags. */ 122 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 123 124 /* Flag set if the callback is using the cb_obsolete function pointer */ 125 #define EVBUFFER_CB_OBSOLETE 0x00040000 126 127 /* evbuffer_chain support */ 128 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 129 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 130 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 131 132 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 133 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 134 135 /* evbuffer_ptr support */ 136 #define PTR_NOT_FOUND(ptr) do { \ 137 (ptr)->pos = -1; \ 138 (ptr)->internal_.chain = NULL; \ 139 (ptr)->internal_.pos_in_chain = 0; \ 140 } while (0) 141 142 static void evbuffer_chain_align(struct evbuffer_chain *chain); 143 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 144 size_t datalen); 145 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); 146 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 147 const struct evbuffer_ptr *pos, const char *mem, size_t len); 148 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 149 size_t datlen); 150 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 151 size_t howfar); 152 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); 153 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); 154 155 static struct evbuffer_chain * 156 evbuffer_chain_new(size_t size) 157 { 158 struct evbuffer_chain *chain; 159 size_t to_alloc; 160 161 if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) 162 return (NULL); 163 164 size += EVBUFFER_CHAIN_SIZE; 165 166 /* get the next largest memory that can hold the buffer */ 167 if (size < EVBUFFER_CHAIN_MAX / 2) { 168 to_alloc = MIN_BUFFER_SIZE; 169 while (to_alloc < size) { 170 to_alloc <<= 1; 171 } 172 } else { 173 to_alloc = size; 174 } 175 176 /* we get everything in one chunk */ 177 if ((chain = mm_malloc(to_alloc)) == NULL) 178 return (NULL); 179 180 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 181 182 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 183 184 /* this way we can manipulate the buffer to different addresses, 185 * which is required for mmap for example. 186 */ 187 chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain); 188 189 chain->refcnt = 1; 190 191 return (chain); 192 } 193 194 static inline void 195 evbuffer_chain_free(struct evbuffer_chain *chain) 196 { 197 EVUTIL_ASSERT(chain->refcnt > 0); 198 if (--chain->refcnt > 0) { 199 /* chain is still referenced by other chains */ 200 return; 201 } 202 203 if (CHAIN_PINNED(chain)) { 204 /* will get freed once no longer dangling */ 205 chain->refcnt++; 206 chain->flags |= EVBUFFER_DANGLING; 207 return; 208 } 209 210 /* safe to release chain, it's either a referencing 211 * chain or all references to it have been freed */ 212 if (chain->flags & EVBUFFER_REFERENCE) { 213 struct evbuffer_chain_reference *info = 214 EVBUFFER_CHAIN_EXTRA( 215 struct evbuffer_chain_reference, 216 chain); 217 if (info->cleanupfn) 218 (*info->cleanupfn)(chain->buffer, 219 chain->buffer_len, 220 info->extra); 221 } 222 if (chain->flags & EVBUFFER_FILESEGMENT) { 223 struct evbuffer_chain_file_segment *info = 224 EVBUFFER_CHAIN_EXTRA( 225 struct evbuffer_chain_file_segment, 226 chain); 227 if (info->segment) { 228 #ifdef _WIN32 229 if (info->segment->is_mapping) 230 UnmapViewOfFile(chain->buffer); 231 #endif 232 evbuffer_file_segment_free(info->segment); 233 } 234 } 235 if (chain->flags & EVBUFFER_MULTICAST) { 236 struct evbuffer_multicast_parent *info = 237 EVBUFFER_CHAIN_EXTRA( 238 struct evbuffer_multicast_parent, 239 chain); 240 /* referencing chain is being freed, decrease 241 * refcounts of source chain and associated 242 * evbuffer (which get freed once both reach 243 * zero) */ 244 EVUTIL_ASSERT(info->source != NULL); 245 EVUTIL_ASSERT(info->parent != NULL); 246 EVBUFFER_LOCK(info->source); 247 evbuffer_chain_free(info->parent); 248 evbuffer_decref_and_unlock_(info->source); 249 } 250 251 mm_free(chain); 252 } 253 254 static void 255 evbuffer_free_all_chains(struct evbuffer_chain *chain) 256 { 257 struct evbuffer_chain *next; 258 for (; chain; chain = next) { 259 next = chain->next; 260 evbuffer_chain_free(chain); 261 } 262 } 263 264 #ifndef NDEBUG 265 static int 266 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 267 { 268 for (; chain; chain = chain->next) { 269 if (chain->off) 270 return 0; 271 } 272 return 1; 273 } 274 #else 275 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 276 "unused variable" warnings. */ 277 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 278 return 1; 279 } 280 #endif 281 282 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 283 * to replacing them all with a new chain. Return a pointer to the place 284 * where the new chain will go. 285 * 286 * Internal; requires lock. The caller must fix up buf->last and buf->first 287 * as needed; they might have been freed. 288 */ 289 static struct evbuffer_chain ** 290 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 291 { 292 struct evbuffer_chain **ch = buf->last_with_datap; 293 /* Find the first victim chain. It might be *last_with_datap */ 294 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 295 ch = &(*ch)->next; 296 if (*ch) { 297 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 298 evbuffer_free_all_chains(*ch); 299 *ch = NULL; 300 } 301 return ch; 302 } 303 304 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 305 * chains as necessary. Requires lock. Does not schedule callbacks. 306 */ 307 static void 308 evbuffer_chain_insert(struct evbuffer *buf, 309 struct evbuffer_chain *chain) 310 { 311 ASSERT_EVBUFFER_LOCKED(buf); 312 if (*buf->last_with_datap == NULL) { 313 /* There are no chains data on the buffer at all. */ 314 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 315 EVUTIL_ASSERT(buf->first == NULL); 316 buf->first = buf->last = chain; 317 } else { 318 struct evbuffer_chain **chp; 319 chp = evbuffer_free_trailing_empty_chains(buf); 320 *chp = chain; 321 if (chain->off) 322 buf->last_with_datap = chp; 323 buf->last = chain; 324 } 325 buf->total_len += chain->off; 326 } 327 328 static inline struct evbuffer_chain * 329 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 330 { 331 struct evbuffer_chain *chain; 332 if ((chain = evbuffer_chain_new(datlen)) == NULL) 333 return NULL; 334 evbuffer_chain_insert(buf, chain); 335 return chain; 336 } 337 338 void 339 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) 340 { 341 EVUTIL_ASSERT((chain->flags & flag) == 0); 342 chain->flags |= flag; 343 } 344 345 void 346 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) 347 { 348 EVUTIL_ASSERT((chain->flags & flag) != 0); 349 chain->flags &= ~flag; 350 if (chain->flags & EVBUFFER_DANGLING) 351 evbuffer_chain_free(chain); 352 } 353 354 static inline void 355 evbuffer_chain_incref(struct evbuffer_chain *chain) 356 { 357 ++chain->refcnt; 358 } 359 360 struct evbuffer * 361 evbuffer_new(void) 362 { 363 struct evbuffer *buffer; 364 365 buffer = mm_calloc(1, sizeof(struct evbuffer)); 366 if (buffer == NULL) 367 return (NULL); 368 369 LIST_INIT(&buffer->callbacks); 370 buffer->refcnt = 1; 371 buffer->last_with_datap = &buffer->first; 372 373 return (buffer); 374 } 375 376 int 377 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 378 { 379 EVBUFFER_LOCK(buf); 380 buf->flags |= (ev_uint32_t)flags; 381 EVBUFFER_UNLOCK(buf); 382 return 0; 383 } 384 385 int 386 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 387 { 388 EVBUFFER_LOCK(buf); 389 buf->flags &= ~(ev_uint32_t)flags; 390 EVBUFFER_UNLOCK(buf); 391 return 0; 392 } 393 394 void 395 evbuffer_incref_(struct evbuffer *buf) 396 { 397 EVBUFFER_LOCK(buf); 398 ++buf->refcnt; 399 EVBUFFER_UNLOCK(buf); 400 } 401 402 void 403 evbuffer_incref_and_lock_(struct evbuffer *buf) 404 { 405 EVBUFFER_LOCK(buf); 406 ++buf->refcnt; 407 } 408 409 int 410 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 411 { 412 EVBUFFER_LOCK(buffer); 413 buffer->cb_queue = base; 414 buffer->deferred_cbs = 1; 415 event_deferred_cb_init_(&buffer->deferred, 416 event_base_get_npriorities(base) / 2, 417 evbuffer_deferred_callback, buffer); 418 EVBUFFER_UNLOCK(buffer); 419 return 0; 420 } 421 422 int 423 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 424 { 425 #ifdef EVENT__DISABLE_THREAD_SUPPORT 426 return -1; 427 #else 428 if (buf->lock) 429 return -1; 430 431 if (!lock) { 432 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 433 if (!lock) 434 return -1; 435 buf->lock = lock; 436 buf->own_lock = 1; 437 } else { 438 buf->lock = lock; 439 buf->own_lock = 0; 440 } 441 442 return 0; 443 #endif 444 } 445 446 void 447 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) 448 { 449 EVBUFFER_LOCK(buf); 450 buf->parent = bev; 451 EVBUFFER_UNLOCK(buf); 452 } 453 454 static void 455 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 456 { 457 struct evbuffer_cb_entry *cbent, *next; 458 struct evbuffer_cb_info info; 459 size_t new_size; 460 ev_uint32_t mask, masked_val; 461 int clear = 1; 462 463 if (running_deferred) { 464 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 465 masked_val = EVBUFFER_CB_ENABLED; 466 } else if (buffer->deferred_cbs) { 467 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 468 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 469 /* Don't zero-out n_add/n_del, since the deferred callbacks 470 will want to see them. */ 471 clear = 0; 472 } else { 473 mask = EVBUFFER_CB_ENABLED; 474 masked_val = EVBUFFER_CB_ENABLED; 475 } 476 477 ASSERT_EVBUFFER_LOCKED(buffer); 478 479 if (LIST_EMPTY(&buffer->callbacks)) { 480 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 481 return; 482 } 483 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 484 return; 485 486 new_size = buffer->total_len; 487 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 488 info.n_added = buffer->n_add_for_cb; 489 info.n_deleted = buffer->n_del_for_cb; 490 if (clear) { 491 buffer->n_add_for_cb = 0; 492 buffer->n_del_for_cb = 0; 493 } 494 for (cbent = LIST_FIRST(&buffer->callbacks); 495 cbent != LIST_END(&buffer->callbacks); 496 cbent = next) { 497 /* Get the 'next' pointer now in case this callback decides 498 * to remove itself or something. */ 499 next = LIST_NEXT(cbent, next); 500 501 if ((cbent->flags & mask) != masked_val) 502 continue; 503 504 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 505 cbent->cb.cb_obsolete(buffer, 506 info.orig_size, new_size, cbent->cbarg); 507 else 508 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 509 } 510 } 511 512 void 513 evbuffer_invoke_callbacks_(struct evbuffer *buffer) 514 { 515 if (LIST_EMPTY(&buffer->callbacks)) { 516 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 517 return; 518 } 519 520 if (buffer->deferred_cbs) { 521 if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { 522 evbuffer_incref_and_lock_(buffer); 523 if (buffer->parent) 524 bufferevent_incref_(buffer->parent); 525 } 526 EVBUFFER_UNLOCK(buffer); 527 } 528 529 evbuffer_run_callbacks(buffer, 0); 530 } 531 532 static void 533 evbuffer_deferred_callback(struct event_callback *cb, void *arg) 534 { 535 struct bufferevent *parent = NULL; 536 struct evbuffer *buffer = arg; 537 538 /* XXXX It would be better to run these callbacks without holding the 539 * lock */ 540 EVBUFFER_LOCK(buffer); 541 parent = buffer->parent; 542 evbuffer_run_callbacks(buffer, 1); 543 evbuffer_decref_and_unlock_(buffer); 544 if (parent) 545 bufferevent_decref_(parent); 546 } 547 548 static void 549 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 550 { 551 struct evbuffer_cb_entry *cbent; 552 553 while ((cbent = LIST_FIRST(&buffer->callbacks))) { 554 LIST_REMOVE(cbent, next); 555 mm_free(cbent); 556 } 557 } 558 559 void 560 evbuffer_decref_and_unlock_(struct evbuffer *buffer) 561 { 562 struct evbuffer_chain *chain, *next; 563 ASSERT_EVBUFFER_LOCKED(buffer); 564 565 EVUTIL_ASSERT(buffer->refcnt > 0); 566 567 if (--buffer->refcnt > 0) { 568 EVBUFFER_UNLOCK(buffer); 569 return; 570 } 571 572 for (chain = buffer->first; chain != NULL; chain = next) { 573 next = chain->next; 574 evbuffer_chain_free(chain); 575 } 576 evbuffer_remove_all_callbacks(buffer); 577 if (buffer->deferred_cbs) 578 event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); 579 580 EVBUFFER_UNLOCK(buffer); 581 if (buffer->own_lock) 582 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 583 mm_free(buffer); 584 } 585 586 void 587 evbuffer_free(struct evbuffer *buffer) 588 { 589 EVBUFFER_LOCK(buffer); 590 evbuffer_decref_and_unlock_(buffer); 591 } 592 593 void 594 evbuffer_lock(struct evbuffer *buf) 595 { 596 EVBUFFER_LOCK(buf); 597 } 598 599 void 600 evbuffer_unlock(struct evbuffer *buf) 601 { 602 EVBUFFER_UNLOCK(buf); 603 } 604 605 size_t 606 evbuffer_get_length(const struct evbuffer *buffer) 607 { 608 size_t result; 609 610 EVBUFFER_LOCK(buffer); 611 612 result = (buffer->total_len); 613 614 EVBUFFER_UNLOCK(buffer); 615 616 return result; 617 } 618 619 size_t 620 evbuffer_get_contiguous_space(const struct evbuffer *buf) 621 { 622 struct evbuffer_chain *chain; 623 size_t result; 624 625 EVBUFFER_LOCK(buf); 626 chain = buf->first; 627 result = (chain != NULL ? chain->off : 0); 628 EVBUFFER_UNLOCK(buf); 629 630 return result; 631 } 632 633 size_t 634 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { 635 int n; 636 size_t res; 637 size_t to_alloc; 638 639 EVBUFFER_LOCK(buf); 640 641 res = to_alloc = 0; 642 643 for (n = 0; n < n_vec; n++) { 644 to_alloc += vec[n].iov_len; 645 } 646 647 if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { 648 goto done; 649 } 650 651 for (n = 0; n < n_vec; n++) { 652 /* XXX each 'add' call here does a bunch of setup that's 653 * obviated by evbuffer_expand_fast_, and some cleanup that we 654 * would like to do only once. Instead we should just extract 655 * the part of the code that's needed. */ 656 657 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { 658 goto done; 659 } 660 661 res += vec[n].iov_len; 662 } 663 664 done: 665 EVBUFFER_UNLOCK(buf); 666 return res; 667 } 668 669 int 670 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 671 struct evbuffer_iovec *vec, int n_vecs) 672 { 673 struct evbuffer_chain *chain, **chainp; 674 int n = -1; 675 676 EVBUFFER_LOCK(buf); 677 if (buf->freeze_end) 678 goto done; 679 if (n_vecs < 1) 680 goto done; 681 if (n_vecs == 1) { 682 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 683 goto done; 684 685 vec[0].iov_base = CHAIN_SPACE_PTR(chain); 686 vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain); 687 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 688 n = 1; 689 } else { 690 if (evbuffer_expand_fast_(buf, size, n_vecs)<0) 691 goto done; 692 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, 693 &chainp, 0); 694 } 695 696 done: 697 EVBUFFER_UNLOCK(buf); 698 return n; 699 700 } 701 702 static int 703 advance_last_with_data(struct evbuffer *buf) 704 { 705 int n = 0; 706 ASSERT_EVBUFFER_LOCKED(buf); 707 708 if (!*buf->last_with_datap) 709 return 0; 710 711 while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) { 712 buf->last_with_datap = &(*buf->last_with_datap)->next; 713 ++n; 714 } 715 return n; 716 } 717 718 int 719 evbuffer_commit_space(struct evbuffer *buf, 720 struct evbuffer_iovec *vec, int n_vecs) 721 { 722 struct evbuffer_chain *chain, **firstchainp, **chainp; 723 int result = -1; 724 size_t added = 0; 725 int i; 726 727 EVBUFFER_LOCK(buf); 728 729 if (buf->freeze_end) 730 goto done; 731 if (n_vecs == 0) { 732 result = 0; 733 goto done; 734 } else if (n_vecs == 1 && 735 (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) { 736 /* The user only got or used one chain; it might not 737 * be the first one with space in it. */ 738 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 739 goto done; 740 buf->last->off += vec[0].iov_len; 741 added = vec[0].iov_len; 742 if (added) 743 advance_last_with_data(buf); 744 goto okay; 745 } 746 747 /* Advance 'firstchain' to the first chain with space in it. */ 748 firstchainp = buf->last_with_datap; 749 if (!*firstchainp) 750 goto done; 751 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 752 firstchainp = &(*firstchainp)->next; 753 } 754 755 chain = *firstchainp; 756 /* pass 1: make sure that the pointers and lengths of vecs[] are in 757 * bounds before we try to commit anything. */ 758 for (i=0; i<n_vecs; ++i) { 759 if (!chain) 760 goto done; 761 if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) || 762 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 763 goto done; 764 chain = chain->next; 765 } 766 /* pass 2: actually adjust all the chains. */ 767 chainp = firstchainp; 768 for (i=0; i<n_vecs; ++i) { 769 (*chainp)->off += vec[i].iov_len; 770 added += vec[i].iov_len; 771 if (vec[i].iov_len) { 772 buf->last_with_datap = chainp; 773 } 774 chainp = &(*chainp)->next; 775 } 776 777 okay: 778 buf->total_len += added; 779 buf->n_add_for_cb += added; 780 result = 0; 781 evbuffer_invoke_callbacks_(buf); 782 783 done: 784 EVBUFFER_UNLOCK(buf); 785 return result; 786 } 787 788 static inline int 789 HAS_PINNED_R(struct evbuffer *buf) 790 { 791 return (buf->last && CHAIN_PINNED_R(buf->last)); 792 } 793 794 static inline void 795 ZERO_CHAIN(struct evbuffer *dst) 796 { 797 ASSERT_EVBUFFER_LOCKED(dst); 798 dst->first = NULL; 799 dst->last = NULL; 800 dst->last_with_datap = &(dst)->first; 801 dst->total_len = 0; 802 } 803 804 /* Prepares the contents of src to be moved to another buffer by removing 805 * read-pinned chains. The first pinned chain is saved in first, and the 806 * last in last. If src has no read-pinned chains, first and last are set 807 * to NULL. */ 808 static int 809 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 810 struct evbuffer_chain **last) 811 { 812 struct evbuffer_chain *chain, **pinned; 813 814 ASSERT_EVBUFFER_LOCKED(src); 815 816 if (!HAS_PINNED_R(src)) { 817 *first = *last = NULL; 818 return 0; 819 } 820 821 pinned = src->last_with_datap; 822 if (!CHAIN_PINNED_R(*pinned)) 823 pinned = &(*pinned)->next; 824 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 825 chain = *first = *pinned; 826 *last = src->last; 827 828 /* If there's data in the first pinned chain, we need to allocate 829 * a new chain and copy the data over. */ 830 if (chain->off) { 831 struct evbuffer_chain *tmp; 832 833 EVUTIL_ASSERT(pinned == src->last_with_datap); 834 tmp = evbuffer_chain_new(chain->off); 835 if (!tmp) 836 return -1; 837 memcpy(tmp->buffer, chain->buffer + chain->misalign, 838 chain->off); 839 tmp->off = chain->off; 840 *src->last_with_datap = tmp; 841 src->last = tmp; 842 chain->misalign += chain->off; 843 chain->off = 0; 844 } else { 845 src->last = *src->last_with_datap; 846 *pinned = NULL; 847 } 848 849 return 0; 850 } 851 852 static inline void 853 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 854 struct evbuffer_chain *last) 855 { 856 ASSERT_EVBUFFER_LOCKED(src); 857 858 if (!pinned) { 859 ZERO_CHAIN(src); 860 return; 861 } 862 863 src->first = pinned; 864 src->last = last; 865 src->last_with_datap = &src->first; 866 src->total_len = 0; 867 } 868 869 static inline void 870 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 871 { 872 ASSERT_EVBUFFER_LOCKED(dst); 873 ASSERT_EVBUFFER_LOCKED(src); 874 dst->first = src->first; 875 if (src->last_with_datap == &src->first) 876 dst->last_with_datap = &dst->first; 877 else 878 dst->last_with_datap = src->last_with_datap; 879 dst->last = src->last; 880 dst->total_len = src->total_len; 881 } 882 883 static void 884 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 885 { 886 ASSERT_EVBUFFER_LOCKED(dst); 887 ASSERT_EVBUFFER_LOCKED(src); 888 889 struct evbuffer_chain **chp; 890 chp = evbuffer_free_trailing_empty_chains(dst); 891 *chp = src->first; 892 893 if (src->last_with_datap == &src->first) 894 dst->last_with_datap = chp; 895 else 896 dst->last_with_datap = src->last_with_datap; 897 dst->last = src->last; 898 dst->total_len += src->total_len; 899 } 900 901 static inline void 902 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) 903 { 904 struct evbuffer_chain *tmp; 905 struct evbuffer_chain *chain = src->first; 906 struct evbuffer_multicast_parent *extra; 907 908 ASSERT_EVBUFFER_LOCKED(dst); 909 ASSERT_EVBUFFER_LOCKED(src); 910 911 for (; chain; chain = chain->next) { 912 if (!chain->off || chain->flags & EVBUFFER_DANGLING) { 913 /* skip empty chains */ 914 continue; 915 } 916 917 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); 918 if (!tmp) { 919 event_warn("%s: out of memory", __func__); 920 return; 921 } 922 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); 923 /* reference evbuffer containing source chain so it 924 * doesn't get released while the chain is still 925 * being referenced to */ 926 evbuffer_incref_(src); 927 extra->source = src; 928 /* reference source chain which now becomes immutable */ 929 evbuffer_chain_incref(chain); 930 extra->parent = chain; 931 chain->flags |= EVBUFFER_IMMUTABLE; 932 tmp->buffer_len = chain->buffer_len; 933 tmp->misalign = chain->misalign; 934 tmp->off = chain->off; 935 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; 936 tmp->buffer = chain->buffer; 937 evbuffer_chain_insert(dst, tmp); 938 } 939 } 940 941 static void 942 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 943 { 944 ASSERT_EVBUFFER_LOCKED(dst); 945 ASSERT_EVBUFFER_LOCKED(src); 946 src->last->next = dst->first; 947 dst->first = src->first; 948 dst->total_len += src->total_len; 949 if (*dst->last_with_datap == NULL) { 950 if (src->last_with_datap == &(src)->first) 951 dst->last_with_datap = &dst->first; 952 else 953 dst->last_with_datap = src->last_with_datap; 954 } else if (dst->last_with_datap == &dst->first) { 955 dst->last_with_datap = &src->last->next; 956 } 957 } 958 959 int 960 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 961 { 962 struct evbuffer_chain *pinned, *last; 963 size_t in_total_len, out_total_len; 964 int result = 0; 965 966 EVBUFFER_LOCK2(inbuf, outbuf); 967 in_total_len = inbuf->total_len; 968 out_total_len = outbuf->total_len; 969 970 if (in_total_len == 0 || outbuf == inbuf) 971 goto done; 972 973 if (outbuf->freeze_end || inbuf->freeze_start) { 974 result = -1; 975 goto done; 976 } 977 978 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 979 result = -1; 980 goto done; 981 } 982 983 if (out_total_len == 0) { 984 /* There might be an empty chain at the start of outbuf; free 985 * it. */ 986 evbuffer_free_all_chains(outbuf->first); 987 COPY_CHAIN(outbuf, inbuf); 988 } else { 989 APPEND_CHAIN(outbuf, inbuf); 990 } 991 992 RESTORE_PINNED(inbuf, pinned, last); 993 994 inbuf->n_del_for_cb += in_total_len; 995 outbuf->n_add_for_cb += in_total_len; 996 997 evbuffer_invoke_callbacks_(inbuf); 998 evbuffer_invoke_callbacks_(outbuf); 999 1000 done: 1001 EVBUFFER_UNLOCK2(inbuf, outbuf); 1002 return result; 1003 } 1004 1005 int 1006 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) 1007 { 1008 size_t in_total_len, out_total_len; 1009 struct evbuffer_chain *chain; 1010 int result = 0; 1011 1012 EVBUFFER_LOCK2(inbuf, outbuf); 1013 in_total_len = inbuf->total_len; 1014 out_total_len = outbuf->total_len; 1015 chain = inbuf->first; 1016 1017 if (in_total_len == 0) 1018 goto done; 1019 1020 if (outbuf->freeze_end || outbuf == inbuf) { 1021 result = -1; 1022 goto done; 1023 } 1024 1025 for (; chain; chain = chain->next) { 1026 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { 1027 /* chain type can not be referenced */ 1028 result = -1; 1029 goto done; 1030 } 1031 } 1032 1033 if (out_total_len == 0) { 1034 /* There might be an empty chain at the start of outbuf; free 1035 * it. */ 1036 evbuffer_free_all_chains(outbuf->first); 1037 } 1038 APPEND_CHAIN_MULTICAST(outbuf, inbuf); 1039 1040 outbuf->n_add_for_cb += in_total_len; 1041 evbuffer_invoke_callbacks_(outbuf); 1042 1043 done: 1044 EVBUFFER_UNLOCK2(inbuf, outbuf); 1045 return result; 1046 } 1047 1048 int 1049 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 1050 { 1051 struct evbuffer_chain *pinned, *last; 1052 size_t in_total_len, out_total_len; 1053 int result = 0; 1054 1055 EVBUFFER_LOCK2(inbuf, outbuf); 1056 1057 in_total_len = inbuf->total_len; 1058 out_total_len = outbuf->total_len; 1059 1060 if (!in_total_len || inbuf == outbuf) 1061 goto done; 1062 1063 if (outbuf->freeze_start || inbuf->freeze_start) { 1064 result = -1; 1065 goto done; 1066 } 1067 1068 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 1069 result = -1; 1070 goto done; 1071 } 1072 1073 if (out_total_len == 0) { 1074 /* There might be an empty chain at the start of outbuf; free 1075 * it. */ 1076 evbuffer_free_all_chains(outbuf->first); 1077 COPY_CHAIN(outbuf, inbuf); 1078 } else { 1079 PREPEND_CHAIN(outbuf, inbuf); 1080 } 1081 1082 RESTORE_PINNED(inbuf, pinned, last); 1083 1084 inbuf->n_del_for_cb += in_total_len; 1085 outbuf->n_add_for_cb += in_total_len; 1086 1087 evbuffer_invoke_callbacks_(inbuf); 1088 evbuffer_invoke_callbacks_(outbuf); 1089 done: 1090 EVBUFFER_UNLOCK2(inbuf, outbuf); 1091 return result; 1092 } 1093 1094 int 1095 evbuffer_drain(struct evbuffer *buf, size_t len) 1096 { 1097 struct evbuffer_chain *chain, *next; 1098 size_t remaining, old_len; 1099 int result = 0; 1100 1101 EVBUFFER_LOCK(buf); 1102 old_len = buf->total_len; 1103 1104 if (old_len == 0) 1105 goto done; 1106 1107 if (buf->freeze_start) { 1108 result = -1; 1109 goto done; 1110 } 1111 1112 if (len >= old_len && !HAS_PINNED_R(buf)) { 1113 len = old_len; 1114 for (chain = buf->first; chain != NULL; chain = next) { 1115 next = chain->next; 1116 evbuffer_chain_free(chain); 1117 } 1118 1119 ZERO_CHAIN(buf); 1120 } else { 1121 if (len >= old_len) 1122 len = old_len; 1123 1124 buf->total_len -= len; 1125 remaining = len; 1126 for (chain = buf->first; 1127 remaining >= chain->off; 1128 chain = next) { 1129 next = chain->next; 1130 remaining -= chain->off; 1131 1132 if (chain == *buf->last_with_datap) { 1133 buf->last_with_datap = &buf->first; 1134 } 1135 if (&chain->next == buf->last_with_datap) 1136 buf->last_with_datap = &buf->first; 1137 1138 if (CHAIN_PINNED_R(chain)) { 1139 EVUTIL_ASSERT(remaining == 0); 1140 chain->misalign += chain->off; 1141 chain->off = 0; 1142 break; 1143 } else 1144 evbuffer_chain_free(chain); 1145 } 1146 1147 buf->first = chain; 1148 EVUTIL_ASSERT(chain && remaining <= chain->off); 1149 chain->misalign += remaining; 1150 chain->off -= remaining; 1151 } 1152 1153 buf->n_del_for_cb += len; 1154 /* Tell someone about changes in this buffer */ 1155 evbuffer_invoke_callbacks_(buf); 1156 1157 done: 1158 EVBUFFER_UNLOCK(buf); 1159 return result; 1160 } 1161 1162 /* Reads data from an event buffer and drains the bytes read */ 1163 int 1164 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1165 { 1166 ev_ssize_t n; 1167 EVBUFFER_LOCK(buf); 1168 n = evbuffer_copyout_from(buf, NULL, data_out, datlen); 1169 if (n > 0) { 1170 if (evbuffer_drain(buf, n)<0) 1171 n = -1; 1172 } 1173 EVBUFFER_UNLOCK(buf); 1174 return (int)n; 1175 } 1176 1177 ev_ssize_t 1178 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1179 { 1180 return evbuffer_copyout_from(buf, NULL, data_out, datlen); 1181 } 1182 1183 ev_ssize_t 1184 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, 1185 void *data_out, size_t datlen) 1186 { 1187 /*XXX fails badly on sendfile case. */ 1188 struct evbuffer_chain *chain; 1189 char *data = data_out; 1190 size_t nread; 1191 ev_ssize_t result = 0; 1192 size_t pos_in_chain; 1193 1194 EVBUFFER_LOCK(buf); 1195 1196 if (pos) { 1197 if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { 1198 result = -1; 1199 goto done; 1200 } 1201 chain = pos->internal_.chain; 1202 pos_in_chain = pos->internal_.pos_in_chain; 1203 if (datlen + pos->pos > buf->total_len) 1204 datlen = buf->total_len - pos->pos; 1205 } else { 1206 chain = buf->first; 1207 pos_in_chain = 0; 1208 if (datlen > buf->total_len) 1209 datlen = buf->total_len; 1210 } 1211 1212 1213 if (datlen == 0) 1214 goto done; 1215 1216 if (buf->freeze_start) { 1217 result = -1; 1218 goto done; 1219 } 1220 1221 nread = datlen; 1222 1223 while (datlen && datlen >= chain->off - pos_in_chain) { 1224 size_t copylen = chain->off - pos_in_chain; 1225 memcpy(data, 1226 chain->buffer + chain->misalign + pos_in_chain, 1227 copylen); 1228 data += copylen; 1229 datlen -= copylen; 1230 1231 chain = chain->next; 1232 pos_in_chain = 0; 1233 EVUTIL_ASSERT(chain || datlen==0); 1234 } 1235 1236 if (datlen) { 1237 EVUTIL_ASSERT(chain); 1238 EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off); 1239 1240 memcpy(data, chain->buffer + chain->misalign + pos_in_chain, 1241 datlen); 1242 } 1243 1244 result = nread; 1245 done: 1246 EVBUFFER_UNLOCK(buf); 1247 return result; 1248 } 1249 1250 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1251 * possible. */ 1252 /* XXXX should return ev_ssize_t */ 1253 int 1254 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1255 size_t datlen) 1256 { 1257 /*XXX We should have an option to force this to be zero-copy.*/ 1258 1259 /*XXX can fail badly on sendfile case. */ 1260 struct evbuffer_chain *chain, *previous; 1261 size_t nread = 0; 1262 int result; 1263 1264 EVBUFFER_LOCK2(src, dst); 1265 1266 chain = previous = src->first; 1267 1268 if (datlen == 0 || dst == src) { 1269 result = 0; 1270 goto done; 1271 } 1272 1273 if (dst->freeze_end || src->freeze_start) { 1274 result = -1; 1275 goto done; 1276 } 1277 1278 /* short-cut if there is no more data buffered */ 1279 if (datlen >= src->total_len) { 1280 datlen = src->total_len; 1281 evbuffer_add_buffer(dst, src); 1282 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1283 goto done; 1284 } 1285 1286 /* removes chains if possible */ 1287 while (chain->off <= datlen) { 1288 /* We can't remove the last with data from src unless we 1289 * remove all chains, in which case we would have done the if 1290 * block above */ 1291 EVUTIL_ASSERT(chain != *src->last_with_datap); 1292 nread += chain->off; 1293 datlen -= chain->off; 1294 previous = chain; 1295 if (src->last_with_datap == &chain->next) 1296 src->last_with_datap = &src->first; 1297 chain = chain->next; 1298 } 1299 1300 if (nread) { 1301 /* we can remove the chain */ 1302 struct evbuffer_chain **chp; 1303 chp = evbuffer_free_trailing_empty_chains(dst); 1304 1305 if (dst->first == NULL) { 1306 dst->first = src->first; 1307 } else { 1308 *chp = src->first; 1309 } 1310 dst->last = previous; 1311 previous->next = NULL; 1312 src->first = chain; 1313 advance_last_with_data(dst); 1314 1315 dst->total_len += nread; 1316 dst->n_add_for_cb += nread; 1317 } 1318 1319 /* we know that there is more data in the src buffer than 1320 * we want to read, so we manually drain the chain */ 1321 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1322 chain->misalign += datlen; 1323 chain->off -= datlen; 1324 nread += datlen; 1325 1326 /* You might think we would want to increment dst->n_add_for_cb 1327 * here too. But evbuffer_add above already took care of that. 1328 */ 1329 src->total_len -= nread; 1330 src->n_del_for_cb += nread; 1331 1332 if (nread) { 1333 evbuffer_invoke_callbacks_(dst); 1334 evbuffer_invoke_callbacks_(src); 1335 } 1336 result = (int)nread;/*XXXX should change return type */ 1337 1338 done: 1339 EVBUFFER_UNLOCK2(src, dst); 1340 return result; 1341 } 1342 1343 unsigned char * 1344 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1345 { 1346 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1347 unsigned char *buffer, *result = NULL; 1348 ev_ssize_t remaining; 1349 int removed_last_with_data = 0; 1350 int removed_last_with_datap = 0; 1351 1352 EVBUFFER_LOCK(buf); 1353 1354 chain = buf->first; 1355 1356 if (size < 0) 1357 size = buf->total_len; 1358 /* if size > buf->total_len, we cannot guarantee to the user that she 1359 * is going to have a long enough buffer afterwards; so we return 1360 * NULL */ 1361 if (size == 0 || (size_t)size > buf->total_len) 1362 goto done; 1363 1364 /* No need to pull up anything; the first size bytes are 1365 * already here. */ 1366 if (chain->off >= (size_t)size) { 1367 result = chain->buffer + chain->misalign; 1368 goto done; 1369 } 1370 1371 /* Make sure that none of the chains we need to copy from is pinned. */ 1372 remaining = size - chain->off; 1373 EVUTIL_ASSERT(remaining >= 0); 1374 for (tmp=chain->next; tmp; tmp=tmp->next) { 1375 if (CHAIN_PINNED(tmp)) 1376 goto done; 1377 if (tmp->off >= (size_t)remaining) 1378 break; 1379 remaining -= tmp->off; 1380 } 1381 1382 if (CHAIN_PINNED(chain)) { 1383 size_t old_off = chain->off; 1384 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1385 /* not enough room at end of chunk. */ 1386 goto done; 1387 } 1388 buffer = CHAIN_SPACE_PTR(chain); 1389 tmp = chain; 1390 tmp->off = size; 1391 size -= old_off; 1392 chain = chain->next; 1393 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1394 /* already have enough space in the first chain */ 1395 size_t old_off = chain->off; 1396 buffer = chain->buffer + chain->misalign + chain->off; 1397 tmp = chain; 1398 tmp->off = size; 1399 size -= old_off; 1400 chain = chain->next; 1401 } else { 1402 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1403 event_warn("%s: out of memory", __func__); 1404 goto done; 1405 } 1406 buffer = tmp->buffer; 1407 tmp->off = size; 1408 buf->first = tmp; 1409 } 1410 1411 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1412 1413 /* Copy and free every chunk that will be entirely pulled into tmp */ 1414 last_with_data = *buf->last_with_datap; 1415 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1416 next = chain->next; 1417 1418 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1419 size -= chain->off; 1420 buffer += chain->off; 1421 if (chain == last_with_data) 1422 removed_last_with_data = 1; 1423 if (&chain->next == buf->last_with_datap) 1424 removed_last_with_datap = 1; 1425 1426 evbuffer_chain_free(chain); 1427 } 1428 1429 if (chain != NULL) { 1430 memcpy(buffer, chain->buffer + chain->misalign, size); 1431 chain->misalign += size; 1432 chain->off -= size; 1433 } else { 1434 buf->last = tmp; 1435 } 1436 1437 tmp->next = chain; 1438 1439 if (removed_last_with_data) { 1440 buf->last_with_datap = &buf->first; 1441 } else if (removed_last_with_datap) { 1442 if (buf->first->next && buf->first->next->off) 1443 buf->last_with_datap = &buf->first->next; 1444 else 1445 buf->last_with_datap = &buf->first; 1446 } 1447 1448 result = (tmp->buffer + tmp->misalign); 1449 1450 done: 1451 EVBUFFER_UNLOCK(buf); 1452 return result; 1453 } 1454 1455 /* 1456 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1457 * The returned buffer needs to be freed by the called. 1458 */ 1459 char * 1460 evbuffer_readline(struct evbuffer *buffer) 1461 { 1462 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1463 } 1464 1465 static inline ev_ssize_t 1466 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1467 { 1468 struct evbuffer_chain *chain = it->internal_.chain; 1469 size_t i = it->internal_.pos_in_chain; 1470 while (chain != NULL) { 1471 char *buffer = (char *)chain->buffer + chain->misalign; 1472 char *cp = memchr(buffer+i, chr, chain->off-i); 1473 if (cp) { 1474 it->internal_.chain = chain; 1475 it->internal_.pos_in_chain = cp - buffer; 1476 it->pos += (cp - buffer - i); 1477 return it->pos; 1478 } 1479 it->pos += chain->off - i; 1480 i = 0; 1481 chain = chain->next; 1482 } 1483 1484 return (-1); 1485 } 1486 1487 static inline char * 1488 find_eol_char(char *s, size_t len) 1489 { 1490 #define CHUNK_SZ 128 1491 /* Lots of benchmarking found this approach to be faster in practice 1492 * than doing two memchrs over the whole buffer, doin a memchr on each 1493 * char of the buffer, or trying to emulate memchr by hand. */ 1494 char *s_end, *cr, *lf; 1495 s_end = s+len; 1496 while (s < s_end) { 1497 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1498 cr = memchr(s, '\r', chunk); 1499 lf = memchr(s, '\n', chunk); 1500 if (cr) { 1501 if (lf && lf < cr) 1502 return lf; 1503 return cr; 1504 } else if (lf) { 1505 return lf; 1506 } 1507 s += CHUNK_SZ; 1508 } 1509 1510 return NULL; 1511 #undef CHUNK_SZ 1512 } 1513 1514 static ev_ssize_t 1515 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1516 { 1517 struct evbuffer_chain *chain = it->internal_.chain; 1518 size_t i = it->internal_.pos_in_chain; 1519 while (chain != NULL) { 1520 char *buffer = (char *)chain->buffer + chain->misalign; 1521 char *cp = find_eol_char(buffer+i, chain->off-i); 1522 if (cp) { 1523 it->internal_.chain = chain; 1524 it->internal_.pos_in_chain = cp - buffer; 1525 it->pos += (cp - buffer) - i; 1526 return it->pos; 1527 } 1528 it->pos += chain->off - i; 1529 i = 0; 1530 chain = chain->next; 1531 } 1532 1533 return (-1); 1534 } 1535 1536 static inline int 1537 evbuffer_strspn( 1538 struct evbuffer_ptr *ptr, const char *chrset) 1539 { 1540 int count = 0; 1541 struct evbuffer_chain *chain = ptr->internal_.chain; 1542 size_t i = ptr->internal_.pos_in_chain; 1543 1544 if (!chain) 1545 return 0; 1546 1547 while (1) { 1548 char *buffer = (char *)chain->buffer + chain->misalign; 1549 for (; i < chain->off; ++i) { 1550 const char *p = chrset; 1551 while (*p) { 1552 if (buffer[i] == *p++) 1553 goto next; 1554 } 1555 ptr->internal_.chain = chain; 1556 ptr->internal_.pos_in_chain = i; 1557 ptr->pos += count; 1558 return count; 1559 next: 1560 ++count; 1561 } 1562 i = 0; 1563 1564 if (! chain->next) { 1565 ptr->internal_.chain = chain; 1566 ptr->internal_.pos_in_chain = i; 1567 ptr->pos += count; 1568 return count; 1569 } 1570 1571 chain = chain->next; 1572 } 1573 } 1574 1575 1576 static inline int 1577 evbuffer_getchr(struct evbuffer_ptr *it) 1578 { 1579 struct evbuffer_chain *chain = it->internal_.chain; 1580 size_t off = it->internal_.pos_in_chain; 1581 1582 if (chain == NULL) 1583 return -1; 1584 1585 return (unsigned char)chain->buffer[chain->misalign + off]; 1586 } 1587 1588 struct evbuffer_ptr 1589 evbuffer_search_eol(struct evbuffer *buffer, 1590 struct evbuffer_ptr *start, size_t *eol_len_out, 1591 enum evbuffer_eol_style eol_style) 1592 { 1593 struct evbuffer_ptr it, it2; 1594 size_t extra_drain = 0; 1595 int ok = 0; 1596 1597 /* Avoid locking in trivial edge cases */ 1598 if (start && start->internal_.chain == NULL) { 1599 PTR_NOT_FOUND(&it); 1600 if (eol_len_out) 1601 *eol_len_out = extra_drain; 1602 return it; 1603 } 1604 1605 EVBUFFER_LOCK(buffer); 1606 1607 if (start) { 1608 memcpy(&it, start, sizeof(it)); 1609 } else { 1610 it.pos = 0; 1611 it.internal_.chain = buffer->first; 1612 it.internal_.pos_in_chain = 0; 1613 } 1614 1615 /* the eol_style determines our first stop character and how many 1616 * characters we are going to drain afterwards. */ 1617 switch (eol_style) { 1618 case EVBUFFER_EOL_ANY: 1619 if (evbuffer_find_eol_char(&it) < 0) 1620 goto done; 1621 memcpy(&it2, &it, sizeof(it)); 1622 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1623 break; 1624 case EVBUFFER_EOL_CRLF_STRICT: { 1625 it = evbuffer_search(buffer, "\r\n", 2, &it); 1626 if (it.pos < 0) 1627 goto done; 1628 extra_drain = 2; 1629 break; 1630 } 1631 case EVBUFFER_EOL_CRLF: { 1632 ev_ssize_t start_pos = it.pos; 1633 /* Look for a LF ... */ 1634 if (evbuffer_strchr(&it, '\n') < 0) 1635 goto done; 1636 extra_drain = 1; 1637 /* ... optionally preceeded by a CR. */ 1638 if (it.pos == start_pos) 1639 break; /* If the first character is \n, don't back up */ 1640 /* This potentially does an extra linear walk over the first 1641 * few chains. Probably, that's not too expensive unless you 1642 * have a really pathological setup. */ 1643 memcpy(&it2, &it, sizeof(it)); 1644 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) 1645 break; 1646 if (evbuffer_getchr(&it2) == '\r') { 1647 memcpy(&it, &it2, sizeof(it)); 1648 extra_drain = 2; 1649 } 1650 break; 1651 } 1652 case EVBUFFER_EOL_LF: 1653 if (evbuffer_strchr(&it, '\n') < 0) 1654 goto done; 1655 extra_drain = 1; 1656 break; 1657 case EVBUFFER_EOL_NUL: 1658 if (evbuffer_strchr(&it, '\0') < 0) 1659 goto done; 1660 extra_drain = 1; 1661 break; 1662 default: 1663 goto done; 1664 } 1665 1666 ok = 1; 1667 done: 1668 EVBUFFER_UNLOCK(buffer); 1669 1670 if (!ok) 1671 PTR_NOT_FOUND(&it); 1672 if (eol_len_out) 1673 *eol_len_out = extra_drain; 1674 1675 return it; 1676 } 1677 1678 char * 1679 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1680 enum evbuffer_eol_style eol_style) 1681 { 1682 struct evbuffer_ptr it; 1683 char *line; 1684 size_t n_to_copy=0, extra_drain=0; 1685 char *result = NULL; 1686 1687 EVBUFFER_LOCK(buffer); 1688 1689 if (buffer->freeze_start) { 1690 goto done; 1691 } 1692 1693 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1694 if (it.pos < 0) 1695 goto done; 1696 n_to_copy = it.pos; 1697 1698 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1699 event_warn("%s: out of memory", __func__); 1700 goto done; 1701 } 1702 1703 evbuffer_remove(buffer, line, n_to_copy); 1704 line[n_to_copy] = '\0'; 1705 1706 evbuffer_drain(buffer, extra_drain); 1707 result = line; 1708 done: 1709 EVBUFFER_UNLOCK(buffer); 1710 1711 if (n_read_out) 1712 *n_read_out = result ? n_to_copy : 0; 1713 1714 return result; 1715 } 1716 1717 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1718 1719 /* Adds data to an event buffer */ 1720 1721 int 1722 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1723 { 1724 struct evbuffer_chain *chain, *tmp; 1725 const unsigned char *data = data_in; 1726 size_t remain, to_alloc; 1727 int result = -1; 1728 1729 EVBUFFER_LOCK(buf); 1730 1731 if (buf->freeze_end) { 1732 goto done; 1733 } 1734 /* Prevent buf->total_len overflow */ 1735 if (datlen > EV_SIZE_MAX - buf->total_len) { 1736 goto done; 1737 } 1738 1739 if (*buf->last_with_datap == NULL) { 1740 chain = buf->last; 1741 } else { 1742 chain = *buf->last_with_datap; 1743 } 1744 1745 /* If there are no chains allocated for this buffer, allocate one 1746 * big enough to hold all the data. */ 1747 if (chain == NULL) { 1748 chain = evbuffer_chain_new(datlen); 1749 if (!chain) 1750 goto done; 1751 evbuffer_chain_insert(buf, chain); 1752 } 1753 1754 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1755 /* Always true for mutable buffers */ 1756 EVUTIL_ASSERT(chain->misalign >= 0 && 1757 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1758 remain = chain->buffer_len - (size_t)chain->misalign - chain->off; 1759 if (remain >= datlen) { 1760 /* there's enough space to hold all the data in the 1761 * current last chain */ 1762 memcpy(chain->buffer + chain->misalign + chain->off, 1763 data, datlen); 1764 chain->off += datlen; 1765 buf->total_len += datlen; 1766 buf->n_add_for_cb += datlen; 1767 goto out; 1768 } else if (!CHAIN_PINNED(chain) && 1769 evbuffer_chain_should_realign(chain, datlen)) { 1770 /* we can fit the data into the misalignment */ 1771 evbuffer_chain_align(chain); 1772 1773 memcpy(chain->buffer + chain->off, data, datlen); 1774 chain->off += datlen; 1775 buf->total_len += datlen; 1776 buf->n_add_for_cb += datlen; 1777 goto out; 1778 } 1779 } else { 1780 /* we cannot write any data to the last chain */ 1781 remain = 0; 1782 } 1783 1784 /* we need to add another chain */ 1785 to_alloc = chain->buffer_len; 1786 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1787 to_alloc <<= 1; 1788 if (datlen > to_alloc) 1789 to_alloc = datlen; 1790 tmp = evbuffer_chain_new(to_alloc); 1791 if (tmp == NULL) 1792 goto done; 1793 1794 if (remain) { 1795 memcpy(chain->buffer + chain->misalign + chain->off, 1796 data, remain); 1797 chain->off += remain; 1798 buf->total_len += remain; 1799 buf->n_add_for_cb += remain; 1800 } 1801 1802 data += remain; 1803 datlen -= remain; 1804 1805 memcpy(tmp->buffer, data, datlen); 1806 tmp->off = datlen; 1807 evbuffer_chain_insert(buf, tmp); 1808 buf->n_add_for_cb += datlen; 1809 1810 out: 1811 evbuffer_invoke_callbacks_(buf); 1812 result = 0; 1813 done: 1814 EVBUFFER_UNLOCK(buf); 1815 return result; 1816 } 1817 1818 int 1819 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1820 { 1821 struct evbuffer_chain *chain, *tmp; 1822 int result = -1; 1823 1824 EVBUFFER_LOCK(buf); 1825 1826 if (buf->freeze_start) { 1827 goto done; 1828 } 1829 if (datlen > EV_SIZE_MAX - buf->total_len) { 1830 goto done; 1831 } 1832 1833 chain = buf->first; 1834 1835 if (chain == NULL) { 1836 chain = evbuffer_chain_new(datlen); 1837 if (!chain) 1838 goto done; 1839 evbuffer_chain_insert(buf, chain); 1840 } 1841 1842 /* we cannot touch immutable buffers */ 1843 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1844 /* Always true for mutable buffers */ 1845 EVUTIL_ASSERT(chain->misalign >= 0 && 1846 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1847 1848 /* If this chain is empty, we can treat it as 1849 * 'empty at the beginning' rather than 'empty at the end' */ 1850 if (chain->off == 0) 1851 chain->misalign = chain->buffer_len; 1852 1853 if ((size_t)chain->misalign >= datlen) { 1854 /* we have enough space to fit everything */ 1855 memcpy(chain->buffer + chain->misalign - datlen, 1856 data, datlen); 1857 chain->off += datlen; 1858 chain->misalign -= datlen; 1859 buf->total_len += datlen; 1860 buf->n_add_for_cb += datlen; 1861 goto out; 1862 } else if (chain->misalign) { 1863 /* we can only fit some of the data. */ 1864 memcpy(chain->buffer, 1865 (char*)data + datlen - chain->misalign, 1866 (size_t)chain->misalign); 1867 chain->off += (size_t)chain->misalign; 1868 buf->total_len += (size_t)chain->misalign; 1869 buf->n_add_for_cb += (size_t)chain->misalign; 1870 datlen -= (size_t)chain->misalign; 1871 chain->misalign = 0; 1872 } 1873 } 1874 1875 /* we need to add another chain */ 1876 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1877 goto done; 1878 buf->first = tmp; 1879 if (buf->last_with_datap == &buf->first) 1880 buf->last_with_datap = &tmp->next; 1881 1882 tmp->next = chain; 1883 1884 tmp->off = datlen; 1885 EVUTIL_ASSERT(datlen <= tmp->buffer_len); 1886 tmp->misalign = tmp->buffer_len - datlen; 1887 1888 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1889 buf->total_len += datlen; 1890 buf->n_add_for_cb += datlen; 1891 1892 out: 1893 evbuffer_invoke_callbacks_(buf); 1894 result = 0; 1895 done: 1896 EVBUFFER_UNLOCK(buf); 1897 return result; 1898 } 1899 1900 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1901 static void 1902 evbuffer_chain_align(struct evbuffer_chain *chain) 1903 { 1904 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1905 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1906 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1907 chain->misalign = 0; 1908 } 1909 1910 #define MAX_TO_COPY_IN_EXPAND 4096 1911 #define MAX_TO_REALIGN_IN_EXPAND 2048 1912 1913 /** Helper: return true iff we should realign chain to fit datalen bytes of 1914 data in it. */ 1915 static int 1916 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1917 size_t datlen) 1918 { 1919 return chain->buffer_len - chain->off >= datlen && 1920 (chain->off < chain->buffer_len / 2) && 1921 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1922 } 1923 1924 /* Expands the available space in the event buffer to at least datlen, all in 1925 * a single chunk. Return that chunk. */ 1926 static struct evbuffer_chain * 1927 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1928 { 1929 struct evbuffer_chain *chain, **chainp; 1930 struct evbuffer_chain *result = NULL; 1931 ASSERT_EVBUFFER_LOCKED(buf); 1932 1933 chainp = buf->last_with_datap; 1934 1935 /* XXX If *chainp is no longer writeable, but has enough space in its 1936 * misalign, this might be a bad idea: we could still use *chainp, not 1937 * (*chainp)->next. */ 1938 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1939 chainp = &(*chainp)->next; 1940 1941 /* 'chain' now points to the first chain with writable space (if any) 1942 * We will either use it, realign it, replace it, or resize it. */ 1943 chain = *chainp; 1944 1945 if (chain == NULL || 1946 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1947 /* We can't use the last_with_data chain at all. Just add a 1948 * new one that's big enough. */ 1949 goto insert_new; 1950 } 1951 1952 /* If we can fit all the data, then we don't have to do anything */ 1953 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1954 result = chain; 1955 goto ok; 1956 } 1957 1958 /* If the chain is completely empty, just replace it by adding a new 1959 * empty chain. */ 1960 if (chain->off == 0) { 1961 goto insert_new; 1962 } 1963 1964 /* If the misalignment plus the remaining space fulfills our data 1965 * needs, we could just force an alignment to happen. Afterwards, we 1966 * have enough space. But only do this if we're saving a lot of space 1967 * and not moving too much data. Otherwise the space savings are 1968 * probably offset by the time lost in copying. 1969 */ 1970 if (evbuffer_chain_should_realign(chain, datlen)) { 1971 evbuffer_chain_align(chain); 1972 result = chain; 1973 goto ok; 1974 } 1975 1976 /* At this point, we can either resize the last chunk with space in 1977 * it, use the next chunk after it, or If we add a new chunk, we waste 1978 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1979 * resize, we have to copy chain->off bytes. 1980 */ 1981 1982 /* Would expanding this chunk be affordable and worthwhile? */ 1983 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1984 chain->off > MAX_TO_COPY_IN_EXPAND || 1985 datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { 1986 /* It's not worth resizing this chain. Can the next one be 1987 * used? */ 1988 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 1989 /* Yes, we can just use the next chain (which should 1990 * be empty. */ 1991 result = chain->next; 1992 goto ok; 1993 } else { 1994 /* No; append a new chain (which will free all 1995 * terminal empty chains.) */ 1996 goto insert_new; 1997 } 1998 } else { 1999 /* Okay, we're going to try to resize this chain: Not doing so 2000 * would waste at least 1/8 of its current allocation, and we 2001 * can do so without having to copy more than 2002 * MAX_TO_COPY_IN_EXPAND bytes. */ 2003 /* figure out how much space we need */ 2004 size_t length = chain->off + datlen; 2005 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 2006 if (tmp == NULL) 2007 goto err; 2008 2009 /* copy the data over that we had so far */ 2010 tmp->off = chain->off; 2011 memcpy(tmp->buffer, chain->buffer + chain->misalign, 2012 chain->off); 2013 /* fix up the list */ 2014 EVUTIL_ASSERT(*chainp == chain); 2015 result = *chainp = tmp; 2016 2017 if (buf->last == chain) 2018 buf->last = tmp; 2019 2020 tmp->next = chain->next; 2021 evbuffer_chain_free(chain); 2022 goto ok; 2023 } 2024 2025 insert_new: 2026 result = evbuffer_chain_insert_new(buf, datlen); 2027 if (!result) 2028 goto err; 2029 ok: 2030 EVUTIL_ASSERT(result); 2031 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 2032 err: 2033 return result; 2034 } 2035 2036 /* Make sure that datlen bytes are available for writing in the last n 2037 * chains. Never copies or moves data. */ 2038 int 2039 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) 2040 { 2041 struct evbuffer_chain *chain = buf->last, *tmp, *next; 2042 size_t avail; 2043 int used; 2044 2045 ASSERT_EVBUFFER_LOCKED(buf); 2046 EVUTIL_ASSERT(n >= 2); 2047 2048 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 2049 /* There is no last chunk, or we can't touch the last chunk. 2050 * Just add a new chunk. */ 2051 chain = evbuffer_chain_new(datlen); 2052 if (chain == NULL) 2053 return (-1); 2054 2055 evbuffer_chain_insert(buf, chain); 2056 return (0); 2057 } 2058 2059 used = 0; /* number of chains we're using space in. */ 2060 avail = 0; /* how much space they have. */ 2061 /* How many bytes can we stick at the end of buffer as it is? Iterate 2062 * over the chains at the end of the buffer, tring to see how much 2063 * space we have in the first n. */ 2064 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 2065 if (chain->off) { 2066 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 2067 EVUTIL_ASSERT(chain == *buf->last_with_datap); 2068 if (space) { 2069 avail += space; 2070 ++used; 2071 } 2072 } else { 2073 /* No data in chain; realign it. */ 2074 chain->misalign = 0; 2075 avail += chain->buffer_len; 2076 ++used; 2077 } 2078 if (avail >= datlen) { 2079 /* There is already enough space. Just return */ 2080 return (0); 2081 } 2082 if (used == n) 2083 break; 2084 } 2085 2086 /* There wasn't enough space in the first n chains with space in 2087 * them. Either add a new chain with enough space, or replace all 2088 * empty chains with one that has enough space, depending on n. */ 2089 if (used < n) { 2090 /* The loop ran off the end of the chains before it hit n 2091 * chains; we can add another. */ 2092 EVUTIL_ASSERT(chain == NULL); 2093 2094 tmp = evbuffer_chain_new(datlen - avail); 2095 if (tmp == NULL) 2096 return (-1); 2097 2098 buf->last->next = tmp; 2099 buf->last = tmp; 2100 /* (we would only set last_with_data if we added the first 2101 * chain. But if the buffer had no chains, we would have 2102 * just allocated a new chain earlier) */ 2103 return (0); 2104 } else { 2105 /* Nuke _all_ the empty chains. */ 2106 int rmv_all = 0; /* True iff we removed last_with_data. */ 2107 chain = *buf->last_with_datap; 2108 if (!chain->off) { 2109 EVUTIL_ASSERT(chain == buf->first); 2110 rmv_all = 1; 2111 avail = 0; 2112 } else { 2113 /* can't overflow, since only mutable chains have 2114 * huge misaligns. */ 2115 avail = (size_t) CHAIN_SPACE_LEN(chain); 2116 chain = chain->next; 2117 } 2118 2119 2120 for (; chain; chain = next) { 2121 next = chain->next; 2122 EVUTIL_ASSERT(chain->off == 0); 2123 evbuffer_chain_free(chain); 2124 } 2125 EVUTIL_ASSERT(datlen >= avail); 2126 tmp = evbuffer_chain_new(datlen - avail); 2127 if (tmp == NULL) { 2128 if (rmv_all) { 2129 ZERO_CHAIN(buf); 2130 } else { 2131 buf->last = *buf->last_with_datap; 2132 (*buf->last_with_datap)->next = NULL; 2133 } 2134 return (-1); 2135 } 2136 2137 if (rmv_all) { 2138 buf->first = buf->last = tmp; 2139 buf->last_with_datap = &buf->first; 2140 } else { 2141 (*buf->last_with_datap)->next = tmp; 2142 buf->last = tmp; 2143 } 2144 return (0); 2145 } 2146 } 2147 2148 int 2149 evbuffer_expand(struct evbuffer *buf, size_t datlen) 2150 { 2151 struct evbuffer_chain *chain; 2152 2153 EVBUFFER_LOCK(buf); 2154 chain = evbuffer_expand_singlechain(buf, datlen); 2155 EVBUFFER_UNLOCK(buf); 2156 return chain ? 0 : -1; 2157 } 2158 2159 /* 2160 * Reads data from a file descriptor into a buffer. 2161 */ 2162 2163 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) 2164 #define USE_IOVEC_IMPL 2165 #endif 2166 2167 #ifdef USE_IOVEC_IMPL 2168 2169 #ifdef EVENT__HAVE_SYS_UIO_H 2170 /* number of iovec we use for writev, fragmentation is going to determine 2171 * how much we end up writing */ 2172 2173 #define DEFAULT_WRITE_IOVEC 128 2174 2175 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 2176 #define NUM_WRITE_IOVEC UIO_MAXIOV 2177 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 2178 #define NUM_WRITE_IOVEC IOV_MAX 2179 #else 2180 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 2181 #endif 2182 2183 #define IOV_TYPE struct iovec 2184 #define IOV_PTR_FIELD iov_base 2185 #define IOV_LEN_FIELD iov_len 2186 #define IOV_LEN_TYPE size_t 2187 #else 2188 #define NUM_WRITE_IOVEC 16 2189 #define IOV_TYPE WSABUF 2190 #define IOV_PTR_FIELD buf 2191 #define IOV_LEN_FIELD len 2192 #define IOV_LEN_TYPE unsigned long 2193 #endif 2194 #endif 2195 #define NUM_READ_IOVEC 4 2196 2197 #define EVBUFFER_MAX_READ 4096 2198 2199 /** Helper function to figure out which space to use for reading data into 2200 an evbuffer. Internal use only. 2201 2202 @param buf The buffer to read into 2203 @param howmuch How much we want to read. 2204 @param vecs An array of two or more iovecs or WSABUFs. 2205 @param n_vecs_avail The length of vecs 2206 @param chainp A pointer to a variable to hold the first chain we're 2207 reading into. 2208 @param exact Boolean: if true, we do not provide more than 'howmuch' 2209 space in the vectors, even if more space is available. 2210 @return The number of buffers we're using. 2211 */ 2212 int 2213 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, 2214 struct evbuffer_iovec *vecs, int n_vecs_avail, 2215 struct evbuffer_chain ***chainp, int exact) 2216 { 2217 struct evbuffer_chain *chain; 2218 struct evbuffer_chain **firstchainp; 2219 size_t so_far; 2220 int i; 2221 ASSERT_EVBUFFER_LOCKED(buf); 2222 2223 if (howmuch < 0) 2224 return -1; 2225 2226 so_far = 0; 2227 /* Let firstchain be the first chain with any space on it */ 2228 firstchainp = buf->last_with_datap; 2229 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2230 firstchainp = &(*firstchainp)->next; 2231 } 2232 2233 chain = *firstchainp; 2234 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2235 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2236 if (avail > (howmuch - so_far) && exact) 2237 avail = howmuch - so_far; 2238 vecs[i].iov_base = CHAIN_SPACE_PTR(chain); 2239 vecs[i].iov_len = avail; 2240 so_far += avail; 2241 chain = chain->next; 2242 } 2243 2244 *chainp = firstchainp; 2245 return i; 2246 } 2247 2248 static int 2249 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2250 { 2251 #if defined(FIONREAD) && defined(_WIN32) 2252 unsigned long lng = EVBUFFER_MAX_READ; 2253 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2254 return -1; 2255 /* Can overflow, but mostly harmlessly. XXXX */ 2256 return (int)lng; 2257 #elif defined(FIONREAD) 2258 int n = EVBUFFER_MAX_READ; 2259 if (ioctl(fd, FIONREAD, &n) < 0) 2260 return -1; 2261 return n; 2262 #else 2263 return EVBUFFER_MAX_READ; 2264 #endif 2265 } 2266 2267 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2268 * as howmuch? */ 2269 int 2270 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2271 { 2272 struct evbuffer_chain **chainp; 2273 int n; 2274 int result; 2275 2276 #ifdef USE_IOVEC_IMPL 2277 int nvecs, i, remaining; 2278 #else 2279 struct evbuffer_chain *chain; 2280 unsigned char *p; 2281 #endif 2282 2283 EVBUFFER_LOCK(buf); 2284 2285 if (buf->freeze_end) { 2286 result = -1; 2287 goto done; 2288 } 2289 2290 n = get_n_bytes_readable_on_socket(fd); 2291 if (n <= 0 || n > EVBUFFER_MAX_READ) 2292 n = EVBUFFER_MAX_READ; 2293 if (howmuch < 0 || howmuch > n) 2294 howmuch = n; 2295 2296 #ifdef USE_IOVEC_IMPL 2297 /* Since we can use iovecs, we're willing to use the last 2298 * NUM_READ_IOVEC chains. */ 2299 if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { 2300 result = -1; 2301 goto done; 2302 } else { 2303 IOV_TYPE vecs[NUM_READ_IOVEC]; 2304 #ifdef EVBUFFER_IOVEC_IS_NATIVE_ 2305 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, 2306 NUM_READ_IOVEC, &chainp, 1); 2307 #else 2308 /* We aren't using the native struct iovec. Therefore, 2309 we are on win32. */ 2310 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2311 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, 2312 &chainp, 1); 2313 2314 for (i=0; i < nvecs; ++i) 2315 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2316 #endif 2317 2318 #ifdef _WIN32 2319 { 2320 DWORD bytesRead; 2321 DWORD flags=0; 2322 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2323 /* The read failed. It might be a close, 2324 * or it might be an error. */ 2325 if (WSAGetLastError() == WSAECONNABORTED) 2326 n = 0; 2327 else 2328 n = -1; 2329 } else 2330 n = bytesRead; 2331 } 2332 #else 2333 n = readv(fd, vecs, nvecs); 2334 #endif 2335 } 2336 2337 #else /*!USE_IOVEC_IMPL*/ 2338 /* If we don't have FIONREAD, we might waste some space here */ 2339 /* XXX we _will_ waste some space here if there is any space left 2340 * over on buf->last. */ 2341 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2342 result = -1; 2343 goto done; 2344 } 2345 2346 /* We can append new data at this point */ 2347 p = chain->buffer + chain->misalign + chain->off; 2348 2349 #ifndef _WIN32 2350 n = read(fd, p, howmuch); 2351 #else 2352 n = recv(fd, p, howmuch, 0); 2353 #endif 2354 #endif /* USE_IOVEC_IMPL */ 2355 2356 if (n == -1) { 2357 result = -1; 2358 goto done; 2359 } 2360 if (n == 0) { 2361 result = 0; 2362 goto done; 2363 } 2364 2365 #ifdef USE_IOVEC_IMPL 2366 remaining = n; 2367 for (i=0; i < nvecs; ++i) { 2368 /* can't overflow, since only mutable chains have 2369 * huge misaligns. */ 2370 size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); 2371 /* XXXX This is a kludge that can waste space in perverse 2372 * situations. */ 2373 if (space > EVBUFFER_CHAIN_MAX) 2374 space = EVBUFFER_CHAIN_MAX; 2375 if ((ev_ssize_t)space < remaining) { 2376 (*chainp)->off += space; 2377 remaining -= (int)space; 2378 } else { 2379 (*chainp)->off += remaining; 2380 buf->last_with_datap = chainp; 2381 break; 2382 } 2383 chainp = &(*chainp)->next; 2384 } 2385 #else 2386 chain->off += n; 2387 advance_last_with_data(buf); 2388 #endif 2389 buf->total_len += n; 2390 buf->n_add_for_cb += n; 2391 2392 /* Tell someone about changes in this buffer */ 2393 evbuffer_invoke_callbacks_(buf); 2394 result = n; 2395 done: 2396 EVBUFFER_UNLOCK(buf); 2397 return result; 2398 } 2399 2400 #ifdef USE_IOVEC_IMPL 2401 static inline int 2402 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2403 ev_ssize_t howmuch) 2404 { 2405 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2406 struct evbuffer_chain *chain = buffer->first; 2407 int n, i = 0; 2408 2409 if (howmuch < 0) 2410 return -1; 2411 2412 ASSERT_EVBUFFER_LOCKED(buffer); 2413 /* XXX make this top out at some maximal data length? if the 2414 * buffer has (say) 1MB in it, split over 128 chains, there's 2415 * no way it all gets written in one go. */ 2416 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2417 #ifdef USE_SENDFILE 2418 /* we cannot write the file info via writev */ 2419 if (chain->flags & EVBUFFER_SENDFILE) 2420 break; 2421 #endif 2422 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2423 if ((size_t)howmuch >= chain->off) { 2424 /* XXXcould be problematic when windows supports mmap*/ 2425 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2426 howmuch -= chain->off; 2427 } else { 2428 /* XXXcould be problematic when windows supports mmap*/ 2429 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2430 break; 2431 } 2432 chain = chain->next; 2433 } 2434 if (! i) 2435 return 0; 2436 2437 #ifdef _WIN32 2438 { 2439 DWORD bytesSent; 2440 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2441 n = -1; 2442 else 2443 n = bytesSent; 2444 } 2445 #else 2446 n = writev(fd, iov, i); 2447 #endif 2448 return (n); 2449 } 2450 #endif 2451 2452 #ifdef USE_SENDFILE 2453 static inline int 2454 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2455 ev_ssize_t howmuch) 2456 { 2457 struct evbuffer_chain *chain = buffer->first; 2458 struct evbuffer_chain_file_segment *info = 2459 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2460 chain); 2461 const int source_fd = info->segment->fd; 2462 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2463 int res; 2464 ev_off_t len = chain->off; 2465 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2466 ev_ssize_t res; 2467 ev_off_t offset = chain->misalign; 2468 #endif 2469 2470 ASSERT_EVBUFFER_LOCKED(buffer); 2471 2472 #if defined(SENDFILE_IS_MACOSX) 2473 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2474 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2475 return (-1); 2476 2477 return (len); 2478 #elif defined(SENDFILE_IS_FREEBSD) 2479 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2480 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2481 return (-1); 2482 2483 return (len); 2484 #elif defined(SENDFILE_IS_LINUX) 2485 /* TODO(niels): implement splice */ 2486 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2487 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2488 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2489 return (0); 2490 } 2491 return (res); 2492 #elif defined(SENDFILE_IS_SOLARIS) 2493 { 2494 const off_t offset_orig = offset; 2495 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2496 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2497 if (offset - offset_orig) 2498 return offset - offset_orig; 2499 /* if this is EAGAIN or EINTR and no bytes were 2500 * written, return 0 */ 2501 return (0); 2502 } 2503 return (res); 2504 } 2505 #endif 2506 } 2507 #endif 2508 2509 int 2510 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2511 ev_ssize_t howmuch) 2512 { 2513 int n = -1; 2514 2515 EVBUFFER_LOCK(buffer); 2516 2517 if (buffer->freeze_start) { 2518 goto done; 2519 } 2520 2521 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2522 howmuch = buffer->total_len; 2523 2524 if (howmuch > 0) { 2525 #ifdef USE_SENDFILE 2526 struct evbuffer_chain *chain = buffer->first; 2527 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2528 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2529 else { 2530 #endif 2531 #ifdef USE_IOVEC_IMPL 2532 n = evbuffer_write_iovec(buffer, fd, howmuch); 2533 #elif defined(_WIN32) 2534 /* XXX(nickm) Don't disable this code until we know if 2535 * the WSARecv code above works. */ 2536 void *p = evbuffer_pullup(buffer, howmuch); 2537 EVUTIL_ASSERT(p || !howmuch); 2538 n = send(fd, p, howmuch, 0); 2539 #else 2540 void *p = evbuffer_pullup(buffer, howmuch); 2541 EVUTIL_ASSERT(p || !howmuch); 2542 n = write(fd, p, howmuch); 2543 #endif 2544 #ifdef USE_SENDFILE 2545 } 2546 #endif 2547 } 2548 2549 if (n > 0) 2550 evbuffer_drain(buffer, n); 2551 2552 done: 2553 EVBUFFER_UNLOCK(buffer); 2554 return (n); 2555 } 2556 2557 int 2558 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2559 { 2560 return evbuffer_write_atmost(buffer, fd, -1); 2561 } 2562 2563 unsigned char * 2564 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2565 { 2566 unsigned char *search; 2567 struct evbuffer_ptr ptr; 2568 2569 EVBUFFER_LOCK(buffer); 2570 2571 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2572 if (ptr.pos < 0) { 2573 search = NULL; 2574 } else { 2575 search = evbuffer_pullup(buffer, ptr.pos + len); 2576 if (search) 2577 search += ptr.pos; 2578 } 2579 EVBUFFER_UNLOCK(buffer); 2580 return search; 2581 } 2582 2583 /* Subract <b>howfar</b> from the position of <b>pos</b> within 2584 * <b>buf</b>. Returns 0 on success, -1 on failure. 2585 * 2586 * This isn't exposed yet, because of potential inefficiency issues. 2587 * Maybe it should be. */ 2588 static int 2589 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 2590 size_t howfar) 2591 { 2592 if (pos->pos < 0) 2593 return -1; 2594 if (howfar > (size_t)pos->pos) 2595 return -1; 2596 if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { 2597 pos->internal_.pos_in_chain -= howfar; 2598 pos->pos -= howfar; 2599 return 0; 2600 } else { 2601 const size_t newpos = pos->pos - howfar; 2602 /* Here's the inefficient part: it walks over the 2603 * chains until we hit newpos. */ 2604 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); 2605 } 2606 } 2607 2608 int 2609 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2610 size_t position, enum evbuffer_ptr_how how) 2611 { 2612 size_t left = position; 2613 struct evbuffer_chain *chain = NULL; 2614 int result = 0; 2615 2616 EVBUFFER_LOCK(buf); 2617 2618 switch (how) { 2619 case EVBUFFER_PTR_SET: 2620 chain = buf->first; 2621 pos->pos = position; 2622 position = 0; 2623 break; 2624 case EVBUFFER_PTR_ADD: 2625 /* this avoids iterating over all previous chains if 2626 we just want to advance the position */ 2627 if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { 2628 EVBUFFER_UNLOCK(buf); 2629 return -1; 2630 } 2631 chain = pos->internal_.chain; 2632 pos->pos += position; 2633 position = pos->internal_.pos_in_chain; 2634 break; 2635 } 2636 2637 EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); 2638 while (chain && position + left >= chain->off) { 2639 left -= chain->off - position; 2640 chain = chain->next; 2641 position = 0; 2642 } 2643 if (chain) { 2644 pos->internal_.chain = chain; 2645 pos->internal_.pos_in_chain = position + left; 2646 } else if (left == 0) { 2647 /* The first byte in the (nonexistent) chain after the last chain */ 2648 pos->internal_.chain = NULL; 2649 pos->internal_.pos_in_chain = 0; 2650 } else { 2651 PTR_NOT_FOUND(pos); 2652 result = -1; 2653 } 2654 2655 EVBUFFER_UNLOCK(buf); 2656 2657 return result; 2658 } 2659 2660 /** 2661 Compare the bytes in buf at position pos to the len bytes in mem. Return 2662 less than 0, 0, or greater than 0 as memcmp. 2663 */ 2664 static int 2665 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2666 const char *mem, size_t len) 2667 { 2668 struct evbuffer_chain *chain; 2669 size_t position; 2670 int r; 2671 2672 ASSERT_EVBUFFER_LOCKED(buf); 2673 2674 if (pos->pos < 0 || 2675 EV_SIZE_MAX - len < (size_t)pos->pos || 2676 pos->pos + len > buf->total_len) 2677 return -1; 2678 2679 chain = pos->internal_.chain; 2680 position = pos->internal_.pos_in_chain; 2681 while (len && chain) { 2682 size_t n_comparable; 2683 if (len + position > chain->off) 2684 n_comparable = chain->off - position; 2685 else 2686 n_comparable = len; 2687 r = memcmp(chain->buffer + chain->misalign + position, mem, 2688 n_comparable); 2689 if (r) 2690 return r; 2691 mem += n_comparable; 2692 len -= n_comparable; 2693 position = 0; 2694 chain = chain->next; 2695 } 2696 2697 return 0; 2698 } 2699 2700 struct evbuffer_ptr 2701 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2702 { 2703 return evbuffer_search_range(buffer, what, len, start, NULL); 2704 } 2705 2706 struct evbuffer_ptr 2707 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2708 { 2709 struct evbuffer_ptr pos; 2710 struct evbuffer_chain *chain, *last_chain = NULL; 2711 const unsigned char *p; 2712 char first; 2713 2714 EVBUFFER_LOCK(buffer); 2715 2716 if (start) { 2717 memcpy(&pos, start, sizeof(pos)); 2718 chain = pos.internal_.chain; 2719 } else { 2720 pos.pos = 0; 2721 chain = pos.internal_.chain = buffer->first; 2722 pos.internal_.pos_in_chain = 0; 2723 } 2724 2725 if (end) 2726 last_chain = end->internal_.chain; 2727 2728 if (!len || len > EV_SSIZE_MAX) 2729 goto done; 2730 2731 first = what[0]; 2732 2733 while (chain) { 2734 const unsigned char *start_at = 2735 chain->buffer + chain->misalign + 2736 pos.internal_.pos_in_chain; 2737 p = memchr(start_at, first, 2738 chain->off - pos.internal_.pos_in_chain); 2739 if (p) { 2740 pos.pos += p - start_at; 2741 pos.internal_.pos_in_chain += p - start_at; 2742 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2743 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2744 goto not_found; 2745 else 2746 goto done; 2747 } 2748 ++pos.pos; 2749 ++pos.internal_.pos_in_chain; 2750 if (pos.internal_.pos_in_chain == chain->off) { 2751 chain = pos.internal_.chain = chain->next; 2752 pos.internal_.pos_in_chain = 0; 2753 } 2754 } else { 2755 if (chain == last_chain) 2756 goto not_found; 2757 pos.pos += chain->off - pos.internal_.pos_in_chain; 2758 chain = pos.internal_.chain = chain->next; 2759 pos.internal_.pos_in_chain = 0; 2760 } 2761 } 2762 2763 not_found: 2764 PTR_NOT_FOUND(&pos); 2765 done: 2766 EVBUFFER_UNLOCK(buffer); 2767 return pos; 2768 } 2769 2770 int 2771 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2772 struct evbuffer_ptr *start_at, 2773 struct evbuffer_iovec *vec, int n_vec) 2774 { 2775 struct evbuffer_chain *chain; 2776 int idx = 0; 2777 ev_ssize_t len_so_far = 0; 2778 2779 /* Avoid locking in trivial edge cases */ 2780 if (start_at && start_at->internal_.chain == NULL) 2781 return 0; 2782 2783 EVBUFFER_LOCK(buffer); 2784 2785 if (start_at) { 2786 chain = start_at->internal_.chain; 2787 len_so_far = chain->off 2788 - start_at->internal_.pos_in_chain; 2789 idx = 1; 2790 if (n_vec > 0) { 2791 vec[0].iov_base = chain->buffer + chain->misalign 2792 + start_at->internal_.pos_in_chain; 2793 vec[0].iov_len = len_so_far; 2794 } 2795 chain = chain->next; 2796 } else { 2797 chain = buffer->first; 2798 } 2799 2800 if (n_vec == 0 && len < 0) { 2801 /* If no vectors are provided and they asked for "everything", 2802 * pretend they asked for the actual available amount. */ 2803 len = buffer->total_len; 2804 if (start_at) { 2805 len -= start_at->pos; 2806 } 2807 } 2808 2809 while (chain) { 2810 if (len >= 0 && len_so_far >= len) 2811 break; 2812 if (idx<n_vec) { 2813 vec[idx].iov_base = chain->buffer + chain->misalign; 2814 vec[idx].iov_len = chain->off; 2815 } else if (len<0) { 2816 break; 2817 } 2818 ++idx; 2819 len_so_far += chain->off; 2820 chain = chain->next; 2821 } 2822 2823 EVBUFFER_UNLOCK(buffer); 2824 2825 return idx; 2826 } 2827 2828 2829 int 2830 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2831 { 2832 char *buffer; 2833 size_t space; 2834 int sz, result = -1; 2835 va_list aq; 2836 struct evbuffer_chain *chain; 2837 2838 2839 EVBUFFER_LOCK(buf); 2840 2841 if (buf->freeze_end) { 2842 goto done; 2843 } 2844 2845 /* make sure that at least some space is available */ 2846 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2847 goto done; 2848 2849 for (;;) { 2850 #if 0 2851 size_t used = chain->misalign + chain->off; 2852 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2853 EVUTIL_ASSERT(chain->buffer_len >= used); 2854 space = chain->buffer_len - used; 2855 #endif 2856 buffer = (char*) CHAIN_SPACE_PTR(chain); 2857 space = (size_t) CHAIN_SPACE_LEN(chain); 2858 2859 #ifndef va_copy 2860 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2861 #endif 2862 va_copy(aq, ap); 2863 2864 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2865 2866 va_end(aq); 2867 2868 if (sz < 0) 2869 goto done; 2870 if (INT_MAX >= EVBUFFER_CHAIN_MAX && 2871 (size_t)sz >= EVBUFFER_CHAIN_MAX) 2872 goto done; 2873 if ((size_t)sz < space) { 2874 chain->off += sz; 2875 buf->total_len += sz; 2876 buf->n_add_for_cb += sz; 2877 2878 advance_last_with_data(buf); 2879 evbuffer_invoke_callbacks_(buf); 2880 result = sz; 2881 goto done; 2882 } 2883 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2884 goto done; 2885 } 2886 /* NOTREACHED */ 2887 2888 done: 2889 EVBUFFER_UNLOCK(buf); 2890 return result; 2891 } 2892 2893 int 2894 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2895 { 2896 int res = -1; 2897 va_list ap; 2898 2899 va_start(ap, fmt); 2900 res = evbuffer_add_vprintf(buf, fmt, ap); 2901 va_end(ap); 2902 2903 return (res); 2904 } 2905 2906 int 2907 evbuffer_add_reference(struct evbuffer *outbuf, 2908 const void *data, size_t datlen, 2909 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2910 { 2911 struct evbuffer_chain *chain; 2912 struct evbuffer_chain_reference *info; 2913 int result = -1; 2914 2915 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2916 if (!chain) 2917 return (-1); 2918 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2919 chain->buffer = (unsigned char *)data; 2920 chain->buffer_len = datlen; 2921 chain->off = datlen; 2922 2923 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2924 info->cleanupfn = cleanupfn; 2925 info->extra = extra; 2926 2927 EVBUFFER_LOCK(outbuf); 2928 if (outbuf->freeze_end) { 2929 /* don't call chain_free; we do not want to actually invoke 2930 * the cleanup function */ 2931 mm_free(chain); 2932 goto done; 2933 } 2934 evbuffer_chain_insert(outbuf, chain); 2935 outbuf->n_add_for_cb += datlen; 2936 2937 evbuffer_invoke_callbacks_(outbuf); 2938 2939 result = 0; 2940 done: 2941 EVBUFFER_UNLOCK(outbuf); 2942 2943 return result; 2944 } 2945 2946 /* TODO(niels): we may want to add to automagically convert to mmap, in 2947 * case evbuffer_remove() or evbuffer_pullup() are being used. 2948 */ 2949 struct evbuffer_file_segment * 2950 evbuffer_file_segment_new( 2951 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2952 { 2953 struct evbuffer_file_segment *seg = 2954 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2955 if (!seg) 2956 return NULL; 2957 seg->refcnt = 1; 2958 seg->fd = fd; 2959 seg->flags = flags; 2960 seg->file_offset = offset; 2961 seg->cleanup_cb = NULL; 2962 seg->cleanup_cb_arg = NULL; 2963 #ifdef _WIN32 2964 #ifndef lseek 2965 #define lseek _lseeki64 2966 #endif 2967 #ifndef fstat 2968 #define fstat _fstat 2969 #endif 2970 #ifndef stat 2971 #define stat _stat 2972 #endif 2973 #endif 2974 if (length == -1) { 2975 struct stat st; 2976 if (fstat(fd, &st) < 0) 2977 goto err; 2978 length = st.st_size; 2979 } 2980 seg->length = length; 2981 2982 if (offset < 0 || length < 0 || 2983 ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) || 2984 (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length)) 2985 goto err; 2986 2987 #if defined(USE_SENDFILE) 2988 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 2989 seg->can_sendfile = 1; 2990 goto done; 2991 } 2992 #endif 2993 2994 if (evbuffer_file_segment_materialize(seg)<0) 2995 goto err; 2996 2997 #if defined(USE_SENDFILE) 2998 done: 2999 #endif 3000 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 3001 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 3002 } 3003 return seg; 3004 err: 3005 mm_free(seg); 3006 return NULL; 3007 } 3008 3009 #ifdef EVENT__HAVE_MMAP 3010 static long 3011 get_page_size(void) 3012 { 3013 #ifdef SC_PAGE_SIZE 3014 return sysconf(SC_PAGE_SIZE); 3015 #elif defined(_SC_PAGE_SIZE) 3016 return sysconf(_SC_PAGE_SIZE); 3017 #else 3018 return 1; 3019 #endif 3020 } 3021 #endif 3022 3023 /* DOCDOC */ 3024 /* Requires lock */ 3025 static int 3026 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) 3027 { 3028 const unsigned flags = seg->flags; 3029 const int fd = seg->fd; 3030 const ev_off_t length = seg->length; 3031 const ev_off_t offset = seg->file_offset; 3032 3033 if (seg->contents) 3034 return 0; /* already materialized */ 3035 3036 #if defined(EVENT__HAVE_MMAP) 3037 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3038 off_t offset_rounded = 0, offset_leftover = 0; 3039 void *mapped; 3040 if (offset) { 3041 /* mmap implementations don't generally like us 3042 * to have an offset that isn't a round */ 3043 long page_size = get_page_size(); 3044 if (page_size == -1) 3045 goto err; 3046 offset_leftover = offset % page_size; 3047 offset_rounded = offset - offset_leftover; 3048 } 3049 mapped = mmap(NULL, length + offset_leftover, 3050 PROT_READ, 3051 #ifdef MAP_NOCACHE 3052 MAP_NOCACHE | /* ??? */ 3053 #endif 3054 #ifdef MAP_FILE 3055 MAP_FILE | 3056 #endif 3057 MAP_PRIVATE, 3058 fd, offset_rounded); 3059 if (mapped == MAP_FAILED) { 3060 event_warn("%s: mmap(%d, %d, %zu) failed", 3061 __func__, fd, 0, (size_t)(offset + length)); 3062 } else { 3063 seg->mapping = mapped; 3064 seg->contents = (char*)mapped+offset_leftover; 3065 seg->mmap_offset = 0; 3066 seg->is_mapping = 1; 3067 goto done; 3068 } 3069 } 3070 #endif 3071 #ifdef _WIN32 3072 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3073 intptr_t h = _get_osfhandle(fd); 3074 HANDLE m; 3075 ev_uint64_t total_size = length+offset; 3076 if ((HANDLE)h == INVALID_HANDLE_VALUE) 3077 goto err; 3078 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 3079 (total_size >> 32), total_size & 0xfffffffful, 3080 NULL); 3081 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 3082 seg->mapping_handle = m; 3083 seg->mmap_offset = offset; 3084 seg->is_mapping = 1; 3085 goto done; 3086 } 3087 } 3088 #endif 3089 { 3090 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 3091 ev_off_t read_so_far = 0; 3092 char *mem; 3093 int e; 3094 ev_ssize_t n = 0; 3095 if (!(mem = mm_malloc(length))) 3096 goto err; 3097 if (start_pos < 0) { 3098 mm_free(mem); 3099 goto err; 3100 } 3101 if (lseek(fd, offset, SEEK_SET) < 0) { 3102 mm_free(mem); 3103 goto err; 3104 } 3105 while (read_so_far < length) { 3106 n = read(fd, mem+read_so_far, length-read_so_far); 3107 if (n <= 0) 3108 break; 3109 read_so_far += n; 3110 } 3111 3112 e = errno; 3113 pos = lseek(fd, start_pos, SEEK_SET); 3114 if (n < 0 || (n == 0 && length > read_so_far)) { 3115 mm_free(mem); 3116 errno = e; 3117 goto err; 3118 } else if (pos < 0) { 3119 mm_free(mem); 3120 goto err; 3121 } 3122 3123 seg->contents = mem; 3124 } 3125 3126 done: 3127 return 0; 3128 err: 3129 return -1; 3130 } 3131 3132 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, 3133 evbuffer_file_segment_cleanup_cb cb, void* arg) 3134 { 3135 EVUTIL_ASSERT(seg->refcnt > 0); 3136 seg->cleanup_cb = cb; 3137 seg->cleanup_cb_arg = arg; 3138 } 3139 3140 void 3141 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 3142 { 3143 int refcnt; 3144 EVLOCK_LOCK(seg->lock, 0); 3145 refcnt = --seg->refcnt; 3146 EVLOCK_UNLOCK(seg->lock, 0); 3147 if (refcnt > 0) 3148 return; 3149 EVUTIL_ASSERT(refcnt == 0); 3150 3151 if (seg->is_mapping) { 3152 #ifdef _WIN32 3153 CloseHandle(seg->mapping_handle); 3154 #elif defined (EVENT__HAVE_MMAP) 3155 off_t offset_leftover; 3156 offset_leftover = seg->file_offset % get_page_size(); 3157 if (munmap(seg->mapping, seg->length + offset_leftover) == -1) 3158 event_warn("%s: munmap failed", __func__); 3159 #endif 3160 } else if (seg->contents) { 3161 mm_free(seg->contents); 3162 } 3163 3164 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 3165 close(seg->fd); 3166 } 3167 3168 if (seg->cleanup_cb) { 3169 (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, 3170 seg->flags, seg->cleanup_cb_arg); 3171 seg->cleanup_cb = NULL; 3172 seg->cleanup_cb_arg = NULL; 3173 } 3174 3175 EVTHREAD_FREE_LOCK(seg->lock, 0); 3176 mm_free(seg); 3177 } 3178 3179 int 3180 evbuffer_add_file_segment(struct evbuffer *buf, 3181 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 3182 { 3183 struct evbuffer_chain *chain; 3184 struct evbuffer_chain_file_segment *extra; 3185 int can_use_sendfile = 0; 3186 3187 EVBUFFER_LOCK(buf); 3188 EVLOCK_LOCK(seg->lock, 0); 3189 if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { 3190 can_use_sendfile = 1; 3191 } else { 3192 if (!seg->contents) { 3193 if (evbuffer_file_segment_materialize(seg)<0) { 3194 EVLOCK_UNLOCK(seg->lock, 0); 3195 EVBUFFER_UNLOCK(buf); 3196 return -1; 3197 } 3198 } 3199 } 3200 ++seg->refcnt; 3201 EVLOCK_UNLOCK(seg->lock, 0); 3202 3203 if (buf->freeze_end) 3204 goto err; 3205 3206 if (length < 0) { 3207 if (offset > seg->length) 3208 goto err; 3209 length = seg->length - offset; 3210 } 3211 3212 /* Can we actually add this? */ 3213 if (offset+length > seg->length) 3214 goto err; 3215 3216 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 3217 if (!chain) 3218 goto err; 3219 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 3220 3221 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 3222 if (can_use_sendfile && seg->can_sendfile) { 3223 chain->flags |= EVBUFFER_SENDFILE; 3224 chain->misalign = seg->file_offset + offset; 3225 chain->off = length; 3226 chain->buffer_len = chain->misalign + length; 3227 } else if (seg->is_mapping) { 3228 #ifdef _WIN32 3229 ev_uint64_t total_offset = seg->mmap_offset+offset; 3230 ev_uint64_t offset_rounded=0, offset_remaining=0; 3231 LPVOID data; 3232 if (total_offset) { 3233 SYSTEM_INFO si; 3234 memset(&si, 0, sizeof(si)); /* cargo cult */ 3235 GetSystemInfo(&si); 3236 offset_remaining = total_offset % si.dwAllocationGranularity; 3237 offset_rounded = total_offset - offset_remaining; 3238 } 3239 data = MapViewOfFile( 3240 seg->mapping_handle, 3241 FILE_MAP_READ, 3242 offset_rounded >> 32, 3243 offset_rounded & 0xfffffffful, 3244 length + offset_remaining); 3245 if (data == NULL) { 3246 mm_free(chain); 3247 goto err; 3248 } 3249 chain->buffer = (unsigned char*) data; 3250 chain->buffer_len = length+offset_remaining; 3251 chain->misalign = offset_remaining; 3252 chain->off = length; 3253 #else 3254 chain->buffer = (unsigned char*)(seg->contents + offset); 3255 chain->buffer_len = length; 3256 chain->off = length; 3257 #endif 3258 } else { 3259 chain->buffer = (unsigned char*)(seg->contents + offset); 3260 chain->buffer_len = length; 3261 chain->off = length; 3262 } 3263 3264 extra->segment = seg; 3265 buf->n_add_for_cb += length; 3266 evbuffer_chain_insert(buf, chain); 3267 3268 evbuffer_invoke_callbacks_(buf); 3269 3270 EVBUFFER_UNLOCK(buf); 3271 3272 return 0; 3273 err: 3274 EVBUFFER_UNLOCK(buf); 3275 evbuffer_file_segment_free(seg); /* Lowers the refcount */ 3276 return -1; 3277 } 3278 3279 int 3280 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 3281 { 3282 struct evbuffer_file_segment *seg; 3283 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 3284 int r; 3285 3286 seg = evbuffer_file_segment_new(fd, offset, length, flags); 3287 if (!seg) 3288 return -1; 3289 r = evbuffer_add_file_segment(buf, seg, 0, length); 3290 if (r == 0) 3291 evbuffer_file_segment_free(seg); 3292 return r; 3293 } 3294 3295 void 3296 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 3297 { 3298 EVBUFFER_LOCK(buffer); 3299 3300 if (!LIST_EMPTY(&buffer->callbacks)) 3301 evbuffer_remove_all_callbacks(buffer); 3302 3303 if (cb) { 3304 struct evbuffer_cb_entry *ent = 3305 evbuffer_add_cb(buffer, NULL, cbarg); 3306 ent->cb.cb_obsolete = cb; 3307 ent->flags |= EVBUFFER_CB_OBSOLETE; 3308 } 3309 EVBUFFER_UNLOCK(buffer); 3310 } 3311 3312 struct evbuffer_cb_entry * 3313 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3314 { 3315 struct evbuffer_cb_entry *e; 3316 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 3317 return NULL; 3318 EVBUFFER_LOCK(buffer); 3319 e->cb.cb_func = cb; 3320 e->cbarg = cbarg; 3321 e->flags = EVBUFFER_CB_ENABLED; 3322 LIST_INSERT_HEAD(&buffer->callbacks, e, next); 3323 EVBUFFER_UNLOCK(buffer); 3324 return e; 3325 } 3326 3327 int 3328 evbuffer_remove_cb_entry(struct evbuffer *buffer, 3329 struct evbuffer_cb_entry *ent) 3330 { 3331 EVBUFFER_LOCK(buffer); 3332 LIST_REMOVE(ent, next); 3333 EVBUFFER_UNLOCK(buffer); 3334 mm_free(ent); 3335 return 0; 3336 } 3337 3338 int 3339 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3340 { 3341 struct evbuffer_cb_entry *cbent; 3342 int result = -1; 3343 EVBUFFER_LOCK(buffer); 3344 LIST_FOREACH(cbent, &buffer->callbacks, next) { 3345 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 3346 result = evbuffer_remove_cb_entry(buffer, cbent); 3347 goto done; 3348 } 3349 } 3350 done: 3351 EVBUFFER_UNLOCK(buffer); 3352 return result; 3353 } 3354 3355 int 3356 evbuffer_cb_set_flags(struct evbuffer *buffer, 3357 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3358 { 3359 /* the user isn't allowed to mess with these. */ 3360 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3361 EVBUFFER_LOCK(buffer); 3362 cb->flags |= flags; 3363 EVBUFFER_UNLOCK(buffer); 3364 return 0; 3365 } 3366 3367 int 3368 evbuffer_cb_clear_flags(struct evbuffer *buffer, 3369 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3370 { 3371 /* the user isn't allowed to mess with these. */ 3372 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3373 EVBUFFER_LOCK(buffer); 3374 cb->flags &= ~flags; 3375 EVBUFFER_UNLOCK(buffer); 3376 return 0; 3377 } 3378 3379 int 3380 evbuffer_freeze(struct evbuffer *buffer, int start) 3381 { 3382 EVBUFFER_LOCK(buffer); 3383 if (start) 3384 buffer->freeze_start = 1; 3385 else 3386 buffer->freeze_end = 1; 3387 EVBUFFER_UNLOCK(buffer); 3388 return 0; 3389 } 3390 3391 int 3392 evbuffer_unfreeze(struct evbuffer *buffer, int start) 3393 { 3394 EVBUFFER_LOCK(buffer); 3395 if (start) 3396 buffer->freeze_start = 0; 3397 else 3398 buffer->freeze_end = 0; 3399 EVBUFFER_UNLOCK(buffer); 3400 return 0; 3401 } 3402 3403 #if 0 3404 void 3405 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3406 { 3407 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3408 cb->size_before_suspend = evbuffer_get_length(buffer); 3409 cb->flags |= EVBUFFER_CB_SUSPENDED; 3410 } 3411 } 3412 3413 void 3414 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3415 { 3416 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3417 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3418 size_t sz = cb->size_before_suspend; 3419 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3420 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3421 cb->size_before_suspend = 0; 3422 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3423 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3424 } 3425 } 3426 } 3427 #endif 3428 3429 int 3430 evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, 3431 int max_cbs) 3432 { 3433 int r = 0; 3434 EVBUFFER_LOCK(buffer); 3435 if (buffer->deferred_cbs) { 3436 if (max_cbs < 1) { 3437 r = -1; 3438 goto done; 3439 } 3440 cbs[0] = &buffer->deferred; 3441 r = 1; 3442 } 3443 done: 3444 EVBUFFER_UNLOCK(buffer); 3445 return r; 3446 } 3447