1 /* 2 * Copyright (c) 2002-2007 Niels Provos <[email protected]> 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 #include "evconfig-private.h" 30 31 #ifdef _WIN32 32 #include <winsock2.h> 33 #include <windows.h> 34 #include <io.h> 35 #endif 36 37 #ifdef EVENT__HAVE_VASPRINTF 38 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 39 * stdio.h. This comes from evconfig-private.h. 40 */ 41 #endif 42 43 #include <sys/types.h> 44 45 #ifdef EVENT__HAVE_SYS_TIME_H 46 #include <sys/time.h> 47 #endif 48 49 #ifdef EVENT__HAVE_SYS_SOCKET_H 50 #include <sys/socket.h> 51 #endif 52 53 #ifdef EVENT__HAVE_SYS_UIO_H 54 #include <sys/uio.h> 55 #endif 56 57 #ifdef EVENT__HAVE_SYS_IOCTL_H 58 #include <sys/ioctl.h> 59 #endif 60 61 #ifdef EVENT__HAVE_SYS_MMAN_H 62 #include <sys/mman.h> 63 #endif 64 65 #ifdef EVENT__HAVE_SYS_SENDFILE_H 66 #include <sys/sendfile.h> 67 #endif 68 #ifdef EVENT__HAVE_SYS_STAT_H 69 #include <sys/stat.h> 70 #endif 71 72 73 #include <errno.h> 74 #include <stdio.h> 75 #include <stdlib.h> 76 #include <string.h> 77 #ifdef EVENT__HAVE_STDARG_H 78 #include <stdarg.h> 79 #endif 80 #ifdef EVENT__HAVE_UNISTD_H 81 #include <unistd.h> 82 #endif 83 #include <limits.h> 84 85 #include "event2/event.h" 86 #include "event2/buffer.h" 87 #include "event2/buffer_compat.h" 88 #include "event2/bufferevent.h" 89 #include "event2/bufferevent_compat.h" 90 #include "event2/bufferevent_struct.h" 91 #include "event2/thread.h" 92 #include "log-internal.h" 93 #include "mm-internal.h" 94 #include "util-internal.h" 95 #include "evthread-internal.h" 96 #include "evbuffer-internal.h" 97 #include "bufferevent-internal.h" 98 #include "event-internal.h" 99 100 /* some systems do not have MAP_FAILED */ 101 #ifndef MAP_FAILED 102 #define MAP_FAILED ((void *)-1) 103 #endif 104 105 /* send file support */ 106 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) 107 #define USE_SENDFILE 1 108 #define SENDFILE_IS_LINUX 1 109 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) 110 #define USE_SENDFILE 1 111 #define SENDFILE_IS_FREEBSD 1 112 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) 113 #define USE_SENDFILE 1 114 #define SENDFILE_IS_MACOSX 1 115 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 116 #define USE_SENDFILE 1 117 #define SENDFILE_IS_SOLARIS 1 118 #endif 119 120 /* Mask of user-selectable callback flags. */ 121 #define EVBUFFER_CB_USER_FLAGS 0xffff 122 /* Mask of all internal-use-only flags. */ 123 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 124 125 /* Flag set if the callback is using the cb_obsolete function pointer */ 126 #define EVBUFFER_CB_OBSOLETE 0x00040000 127 128 /* evbuffer_chain support */ 129 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 130 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 131 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 132 133 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 134 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 135 136 /* evbuffer_ptr support */ 137 #define PTR_NOT_FOUND(ptr) do { \ 138 (ptr)->pos = -1; \ 139 (ptr)->internal_.chain = NULL; \ 140 (ptr)->internal_.pos_in_chain = 0; \ 141 } while (0) 142 143 static void evbuffer_chain_align(struct evbuffer_chain *chain); 144 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 145 size_t datalen); 146 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); 147 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 148 const struct evbuffer_ptr *pos, const char *mem, size_t len); 149 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 150 size_t datlen); 151 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 152 size_t howfar); 153 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); 154 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); 155 156 static struct evbuffer_chain * 157 evbuffer_chain_new(size_t size) 158 { 159 struct evbuffer_chain *chain; 160 size_t to_alloc; 161 162 if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) 163 return (NULL); 164 165 size += EVBUFFER_CHAIN_SIZE; 166 167 /* get the next largest memory that can hold the buffer */ 168 if (size < EVBUFFER_CHAIN_MAX / 2) { 169 to_alloc = MIN_BUFFER_SIZE; 170 while (to_alloc < size) { 171 to_alloc <<= 1; 172 } 173 } else { 174 to_alloc = size; 175 } 176 177 /* we get everything in one chunk */ 178 if ((chain = mm_malloc(to_alloc)) == NULL) 179 return (NULL); 180 181 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 182 183 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 184 185 /* this way we can manipulate the buffer to different addresses, 186 * which is required for mmap for example. 187 */ 188 chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain); 189 190 chain->refcnt = 1; 191 192 return (chain); 193 } 194 195 static inline void 196 evbuffer_chain_free(struct evbuffer_chain *chain) 197 { 198 EVUTIL_ASSERT(chain->refcnt > 0); 199 if (--chain->refcnt > 0) { 200 /* chain is still referenced by other chains */ 201 return; 202 } 203 204 if (CHAIN_PINNED(chain)) { 205 /* will get freed once no longer dangling */ 206 chain->refcnt++; 207 chain->flags |= EVBUFFER_DANGLING; 208 return; 209 } 210 211 /* safe to release chain, it's either a referencing 212 * chain or all references to it have been freed */ 213 if (chain->flags & EVBUFFER_REFERENCE) { 214 struct evbuffer_chain_reference *info = 215 EVBUFFER_CHAIN_EXTRA( 216 struct evbuffer_chain_reference, 217 chain); 218 if (info->cleanupfn) 219 (*info->cleanupfn)(chain->buffer, 220 chain->buffer_len, 221 info->extra); 222 } 223 if (chain->flags & EVBUFFER_FILESEGMENT) { 224 struct evbuffer_chain_file_segment *info = 225 EVBUFFER_CHAIN_EXTRA( 226 struct evbuffer_chain_file_segment, 227 chain); 228 if (info->segment) { 229 #ifdef _WIN32 230 if (info->segment->is_mapping) 231 UnmapViewOfFile(chain->buffer); 232 #endif 233 evbuffer_file_segment_free(info->segment); 234 } 235 } 236 if (chain->flags & EVBUFFER_MULTICAST) { 237 struct evbuffer_multicast_parent *info = 238 EVBUFFER_CHAIN_EXTRA( 239 struct evbuffer_multicast_parent, 240 chain); 241 /* referencing chain is being freed, decrease 242 * refcounts of source chain and associated 243 * evbuffer (which get freed once both reach 244 * zero) */ 245 EVUTIL_ASSERT(info->source != NULL); 246 EVUTIL_ASSERT(info->parent != NULL); 247 EVBUFFER_LOCK(info->source); 248 evbuffer_chain_free(info->parent); 249 evbuffer_decref_and_unlock_(info->source); 250 } 251 252 mm_free(chain); 253 } 254 255 static void 256 evbuffer_free_all_chains(struct evbuffer_chain *chain) 257 { 258 struct evbuffer_chain *next; 259 for (; chain; chain = next) { 260 next = chain->next; 261 evbuffer_chain_free(chain); 262 } 263 } 264 265 #ifndef NDEBUG 266 static int 267 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 268 { 269 for (; chain; chain = chain->next) { 270 if (chain->off) 271 return 0; 272 } 273 return 1; 274 } 275 #else 276 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 277 "unused variable" warnings. */ 278 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 279 return 1; 280 } 281 #endif 282 283 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 284 * to replacing them all with a new chain. Return a pointer to the place 285 * where the new chain will go. 286 * 287 * Internal; requires lock. The caller must fix up buf->last and buf->first 288 * as needed; they might have been freed. 289 */ 290 static struct evbuffer_chain ** 291 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 292 { 293 struct evbuffer_chain **ch = buf->last_with_datap; 294 /* Find the first victim chain. It might be *last_with_datap */ 295 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 296 ch = &(*ch)->next; 297 if (*ch) { 298 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 299 evbuffer_free_all_chains(*ch); 300 *ch = NULL; 301 } 302 return ch; 303 } 304 305 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 306 * chains as necessary. Requires lock. Does not schedule callbacks. 307 */ 308 static void 309 evbuffer_chain_insert(struct evbuffer *buf, 310 struct evbuffer_chain *chain) 311 { 312 ASSERT_EVBUFFER_LOCKED(buf); 313 if (*buf->last_with_datap == NULL) { 314 /* There are no chains data on the buffer at all. */ 315 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 316 EVUTIL_ASSERT(buf->first == NULL); 317 buf->first = buf->last = chain; 318 } else { 319 struct evbuffer_chain **chp; 320 chp = evbuffer_free_trailing_empty_chains(buf); 321 *chp = chain; 322 if (chain->off) 323 buf->last_with_datap = chp; 324 buf->last = chain; 325 } 326 buf->total_len += chain->off; 327 } 328 329 static inline struct evbuffer_chain * 330 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 331 { 332 struct evbuffer_chain *chain; 333 if ((chain = evbuffer_chain_new(datlen)) == NULL) 334 return NULL; 335 evbuffer_chain_insert(buf, chain); 336 return chain; 337 } 338 339 void 340 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) 341 { 342 EVUTIL_ASSERT((chain->flags & flag) == 0); 343 chain->flags |= flag; 344 } 345 346 void 347 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) 348 { 349 EVUTIL_ASSERT((chain->flags & flag) != 0); 350 chain->flags &= ~flag; 351 if (chain->flags & EVBUFFER_DANGLING) 352 evbuffer_chain_free(chain); 353 } 354 355 static inline void 356 evbuffer_chain_incref(struct evbuffer_chain *chain) 357 { 358 ++chain->refcnt; 359 } 360 361 struct evbuffer * 362 evbuffer_new(void) 363 { 364 struct evbuffer *buffer; 365 366 buffer = mm_calloc(1, sizeof(struct evbuffer)); 367 if (buffer == NULL) 368 return (NULL); 369 370 LIST_INIT(&buffer->callbacks); 371 buffer->refcnt = 1; 372 buffer->last_with_datap = &buffer->first; 373 374 return (buffer); 375 } 376 377 int 378 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 379 { 380 EVBUFFER_LOCK(buf); 381 buf->flags |= (ev_uint32_t)flags; 382 EVBUFFER_UNLOCK(buf); 383 return 0; 384 } 385 386 int 387 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 388 { 389 EVBUFFER_LOCK(buf); 390 buf->flags &= ~(ev_uint32_t)flags; 391 EVBUFFER_UNLOCK(buf); 392 return 0; 393 } 394 395 void 396 evbuffer_incref_(struct evbuffer *buf) 397 { 398 EVBUFFER_LOCK(buf); 399 ++buf->refcnt; 400 EVBUFFER_UNLOCK(buf); 401 } 402 403 void 404 evbuffer_incref_and_lock_(struct evbuffer *buf) 405 { 406 EVBUFFER_LOCK(buf); 407 ++buf->refcnt; 408 } 409 410 int 411 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 412 { 413 EVBUFFER_LOCK(buffer); 414 buffer->cb_queue = base; 415 buffer->deferred_cbs = 1; 416 event_deferred_cb_init_(&buffer->deferred, 417 event_base_get_npriorities(base) / 2, 418 evbuffer_deferred_callback, buffer); 419 EVBUFFER_UNLOCK(buffer); 420 return 0; 421 } 422 423 int 424 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 425 { 426 #ifdef EVENT__DISABLE_THREAD_SUPPORT 427 return -1; 428 #else 429 if (buf->lock) 430 return -1; 431 432 if (!lock) { 433 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 434 if (!lock) 435 return -1; 436 buf->lock = lock; 437 buf->own_lock = 1; 438 } else { 439 buf->lock = lock; 440 buf->own_lock = 0; 441 } 442 443 return 0; 444 #endif 445 } 446 447 void 448 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) 449 { 450 EVBUFFER_LOCK(buf); 451 buf->parent = bev; 452 EVBUFFER_UNLOCK(buf); 453 } 454 455 static void 456 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 457 { 458 struct evbuffer_cb_entry *cbent, *next; 459 struct evbuffer_cb_info info; 460 size_t new_size; 461 ev_uint32_t mask, masked_val; 462 int clear = 1; 463 464 if (running_deferred) { 465 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 466 masked_val = EVBUFFER_CB_ENABLED; 467 } else if (buffer->deferred_cbs) { 468 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 469 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 470 /* Don't zero-out n_add/n_del, since the deferred callbacks 471 will want to see them. */ 472 clear = 0; 473 } else { 474 mask = EVBUFFER_CB_ENABLED; 475 masked_val = EVBUFFER_CB_ENABLED; 476 } 477 478 ASSERT_EVBUFFER_LOCKED(buffer); 479 480 if (LIST_EMPTY(&buffer->callbacks)) { 481 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 482 return; 483 } 484 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 485 return; 486 487 new_size = buffer->total_len; 488 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 489 info.n_added = buffer->n_add_for_cb; 490 info.n_deleted = buffer->n_del_for_cb; 491 if (clear) { 492 buffer->n_add_for_cb = 0; 493 buffer->n_del_for_cb = 0; 494 } 495 for (cbent = LIST_FIRST(&buffer->callbacks); 496 cbent != LIST_END(&buffer->callbacks); 497 cbent = next) { 498 /* Get the 'next' pointer now in case this callback decides 499 * to remove itself or something. */ 500 next = LIST_NEXT(cbent, next); 501 502 if ((cbent->flags & mask) != masked_val) 503 continue; 504 505 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 506 cbent->cb.cb_obsolete(buffer, 507 info.orig_size, new_size, cbent->cbarg); 508 else 509 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 510 } 511 } 512 513 void 514 evbuffer_invoke_callbacks_(struct evbuffer *buffer) 515 { 516 if (LIST_EMPTY(&buffer->callbacks)) { 517 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 518 return; 519 } 520 521 if (buffer->deferred_cbs) { 522 if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { 523 evbuffer_incref_and_lock_(buffer); 524 if (buffer->parent) 525 bufferevent_incref_(buffer->parent); 526 EVBUFFER_UNLOCK(buffer); 527 } 528 } 529 530 evbuffer_run_callbacks(buffer, 0); 531 } 532 533 static void 534 evbuffer_deferred_callback(struct event_callback *cb, void *arg) 535 { 536 struct bufferevent *parent = NULL; 537 struct evbuffer *buffer = arg; 538 539 /* XXXX It would be better to run these callbacks without holding the 540 * lock */ 541 EVBUFFER_LOCK(buffer); 542 parent = buffer->parent; 543 evbuffer_run_callbacks(buffer, 1); 544 evbuffer_decref_and_unlock_(buffer); 545 if (parent) 546 bufferevent_decref_(parent); 547 } 548 549 static void 550 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 551 { 552 struct evbuffer_cb_entry *cbent; 553 554 while ((cbent = LIST_FIRST(&buffer->callbacks))) { 555 LIST_REMOVE(cbent, next); 556 mm_free(cbent); 557 } 558 } 559 560 void 561 evbuffer_decref_and_unlock_(struct evbuffer *buffer) 562 { 563 struct evbuffer_chain *chain, *next; 564 ASSERT_EVBUFFER_LOCKED(buffer); 565 566 EVUTIL_ASSERT(buffer->refcnt > 0); 567 568 if (--buffer->refcnt > 0) { 569 EVBUFFER_UNLOCK(buffer); 570 return; 571 } 572 573 for (chain = buffer->first; chain != NULL; chain = next) { 574 next = chain->next; 575 evbuffer_chain_free(chain); 576 } 577 evbuffer_remove_all_callbacks(buffer); 578 if (buffer->deferred_cbs) 579 event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); 580 581 EVBUFFER_UNLOCK(buffer); 582 if (buffer->own_lock) 583 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 584 mm_free(buffer); 585 } 586 587 void 588 evbuffer_free(struct evbuffer *buffer) 589 { 590 EVBUFFER_LOCK(buffer); 591 evbuffer_decref_and_unlock_(buffer); 592 } 593 594 void 595 evbuffer_lock(struct evbuffer *buf) 596 { 597 EVBUFFER_LOCK(buf); 598 } 599 600 void 601 evbuffer_unlock(struct evbuffer *buf) 602 { 603 EVBUFFER_UNLOCK(buf); 604 } 605 606 size_t 607 evbuffer_get_length(const struct evbuffer *buffer) 608 { 609 size_t result; 610 611 EVBUFFER_LOCK(buffer); 612 613 result = (buffer->total_len); 614 615 EVBUFFER_UNLOCK(buffer); 616 617 return result; 618 } 619 620 size_t 621 evbuffer_get_contiguous_space(const struct evbuffer *buf) 622 { 623 struct evbuffer_chain *chain; 624 size_t result; 625 626 EVBUFFER_LOCK(buf); 627 chain = buf->first; 628 result = (chain != NULL ? chain->off : 0); 629 EVBUFFER_UNLOCK(buf); 630 631 return result; 632 } 633 634 size_t 635 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { 636 int n; 637 size_t res; 638 size_t to_alloc; 639 640 EVBUFFER_LOCK(buf); 641 642 res = to_alloc = 0; 643 644 for (n = 0; n < n_vec; n++) { 645 to_alloc += vec[n].iov_len; 646 } 647 648 if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { 649 goto done; 650 } 651 652 for (n = 0; n < n_vec; n++) { 653 /* XXX each 'add' call here does a bunch of setup that's 654 * obviated by evbuffer_expand_fast_, and some cleanup that we 655 * would like to do only once. Instead we should just extract 656 * the part of the code that's needed. */ 657 658 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { 659 goto done; 660 } 661 662 res += vec[n].iov_len; 663 } 664 665 done: 666 EVBUFFER_UNLOCK(buf); 667 return res; 668 } 669 670 int 671 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 672 struct evbuffer_iovec *vec, int n_vecs) 673 { 674 struct evbuffer_chain *chain, **chainp; 675 int n = -1; 676 677 EVBUFFER_LOCK(buf); 678 if (buf->freeze_end) 679 goto done; 680 if (n_vecs < 1) 681 goto done; 682 if (n_vecs == 1) { 683 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 684 goto done; 685 686 vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain); 687 vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain); 688 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 689 n = 1; 690 } else { 691 if (evbuffer_expand_fast_(buf, size, n_vecs)<0) 692 goto done; 693 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, 694 &chainp, 0); 695 } 696 697 done: 698 EVBUFFER_UNLOCK(buf); 699 return n; 700 701 } 702 703 static int 704 advance_last_with_data(struct evbuffer *buf) 705 { 706 int n = 0; 707 ASSERT_EVBUFFER_LOCKED(buf); 708 709 if (!*buf->last_with_datap) 710 return 0; 711 712 while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) { 713 buf->last_with_datap = &(*buf->last_with_datap)->next; 714 ++n; 715 } 716 return n; 717 } 718 719 int 720 evbuffer_commit_space(struct evbuffer *buf, 721 struct evbuffer_iovec *vec, int n_vecs) 722 { 723 struct evbuffer_chain *chain, **firstchainp, **chainp; 724 int result = -1; 725 size_t added = 0; 726 int i; 727 728 EVBUFFER_LOCK(buf); 729 730 if (buf->freeze_end) 731 goto done; 732 if (n_vecs == 0) { 733 result = 0; 734 goto done; 735 } else if (n_vecs == 1 && 736 (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { 737 /* The user only got or used one chain; it might not 738 * be the first one with space in it. */ 739 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 740 goto done; 741 buf->last->off += vec[0].iov_len; 742 added = vec[0].iov_len; 743 if (added) 744 advance_last_with_data(buf); 745 goto okay; 746 } 747 748 /* Advance 'firstchain' to the first chain with space in it. */ 749 firstchainp = buf->last_with_datap; 750 if (!*firstchainp) 751 goto done; 752 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 753 firstchainp = &(*firstchainp)->next; 754 } 755 756 chain = *firstchainp; 757 /* pass 1: make sure that the pointers and lengths of vecs[] are in 758 * bounds before we try to commit anything. */ 759 for (i=0; i<n_vecs; ++i) { 760 if (!chain) 761 goto done; 762 if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) || 763 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 764 goto done; 765 chain = chain->next; 766 } 767 /* pass 2: actually adjust all the chains. */ 768 chainp = firstchainp; 769 for (i=0; i<n_vecs; ++i) { 770 (*chainp)->off += vec[i].iov_len; 771 added += vec[i].iov_len; 772 if (vec[i].iov_len) { 773 buf->last_with_datap = chainp; 774 } 775 chainp = &(*chainp)->next; 776 } 777 778 okay: 779 buf->total_len += added; 780 buf->n_add_for_cb += added; 781 result = 0; 782 evbuffer_invoke_callbacks_(buf); 783 784 done: 785 EVBUFFER_UNLOCK(buf); 786 return result; 787 } 788 789 static inline int 790 HAS_PINNED_R(struct evbuffer *buf) 791 { 792 return (buf->last && CHAIN_PINNED_R(buf->last)); 793 } 794 795 static inline void 796 ZERO_CHAIN(struct evbuffer *dst) 797 { 798 ASSERT_EVBUFFER_LOCKED(dst); 799 dst->first = NULL; 800 dst->last = NULL; 801 dst->last_with_datap = &(dst)->first; 802 dst->total_len = 0; 803 } 804 805 /* Prepares the contents of src to be moved to another buffer by removing 806 * read-pinned chains. The first pinned chain is saved in first, and the 807 * last in last. If src has no read-pinned chains, first and last are set 808 * to NULL. */ 809 static int 810 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 811 struct evbuffer_chain **last) 812 { 813 struct evbuffer_chain *chain, **pinned; 814 815 ASSERT_EVBUFFER_LOCKED(src); 816 817 if (!HAS_PINNED_R(src)) { 818 *first = *last = NULL; 819 return 0; 820 } 821 822 pinned = src->last_with_datap; 823 if (!CHAIN_PINNED_R(*pinned)) 824 pinned = &(*pinned)->next; 825 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 826 chain = *first = *pinned; 827 *last = src->last; 828 829 /* If there's data in the first pinned chain, we need to allocate 830 * a new chain and copy the data over. */ 831 if (chain->off) { 832 struct evbuffer_chain *tmp; 833 834 EVUTIL_ASSERT(pinned == src->last_with_datap); 835 tmp = evbuffer_chain_new(chain->off); 836 if (!tmp) 837 return -1; 838 memcpy(tmp->buffer, chain->buffer + chain->misalign, 839 chain->off); 840 tmp->off = chain->off; 841 *src->last_with_datap = tmp; 842 src->last = tmp; 843 chain->misalign += chain->off; 844 chain->off = 0; 845 } else { 846 src->last = *src->last_with_datap; 847 *pinned = NULL; 848 } 849 850 return 0; 851 } 852 853 static inline void 854 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 855 struct evbuffer_chain *last) 856 { 857 ASSERT_EVBUFFER_LOCKED(src); 858 859 if (!pinned) { 860 ZERO_CHAIN(src); 861 return; 862 } 863 864 src->first = pinned; 865 src->last = last; 866 src->last_with_datap = &src->first; 867 src->total_len = 0; 868 } 869 870 static inline void 871 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 872 { 873 ASSERT_EVBUFFER_LOCKED(dst); 874 ASSERT_EVBUFFER_LOCKED(src); 875 dst->first = src->first; 876 if (src->last_with_datap == &src->first) 877 dst->last_with_datap = &dst->first; 878 else 879 dst->last_with_datap = src->last_with_datap; 880 dst->last = src->last; 881 dst->total_len = src->total_len; 882 } 883 884 static void 885 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 886 { 887 struct evbuffer_chain **chp; 888 889 ASSERT_EVBUFFER_LOCKED(dst); 890 ASSERT_EVBUFFER_LOCKED(src); 891 892 chp = evbuffer_free_trailing_empty_chains(dst); 893 *chp = src->first; 894 895 if (src->last_with_datap == &src->first) 896 dst->last_with_datap = chp; 897 else 898 dst->last_with_datap = src->last_with_datap; 899 dst->last = src->last; 900 dst->total_len += src->total_len; 901 } 902 903 static inline void 904 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) 905 { 906 struct evbuffer_chain *tmp; 907 struct evbuffer_chain *chain = src->first; 908 struct evbuffer_multicast_parent *extra; 909 910 ASSERT_EVBUFFER_LOCKED(dst); 911 ASSERT_EVBUFFER_LOCKED(src); 912 913 for (; chain; chain = chain->next) { 914 if (!chain->off || chain->flags & EVBUFFER_DANGLING) { 915 /* skip empty chains */ 916 continue; 917 } 918 919 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); 920 if (!tmp) { 921 event_warn("%s: out of memory", __func__); 922 return; 923 } 924 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); 925 /* reference evbuffer containing source chain so it 926 * doesn't get released while the chain is still 927 * being referenced to */ 928 evbuffer_incref_(src); 929 extra->source = src; 930 /* reference source chain which now becomes immutable */ 931 evbuffer_chain_incref(chain); 932 extra->parent = chain; 933 chain->flags |= EVBUFFER_IMMUTABLE; 934 tmp->buffer_len = chain->buffer_len; 935 tmp->misalign = chain->misalign; 936 tmp->off = chain->off; 937 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; 938 tmp->buffer = chain->buffer; 939 evbuffer_chain_insert(dst, tmp); 940 } 941 } 942 943 static void 944 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 945 { 946 ASSERT_EVBUFFER_LOCKED(dst); 947 ASSERT_EVBUFFER_LOCKED(src); 948 src->last->next = dst->first; 949 dst->first = src->first; 950 dst->total_len += src->total_len; 951 if (*dst->last_with_datap == NULL) { 952 if (src->last_with_datap == &(src)->first) 953 dst->last_with_datap = &dst->first; 954 else 955 dst->last_with_datap = src->last_with_datap; 956 } else if (dst->last_with_datap == &dst->first) { 957 dst->last_with_datap = &src->last->next; 958 } 959 } 960 961 int 962 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 963 { 964 struct evbuffer_chain *pinned, *last; 965 size_t in_total_len, out_total_len; 966 int result = 0; 967 968 EVBUFFER_LOCK2(inbuf, outbuf); 969 in_total_len = inbuf->total_len; 970 out_total_len = outbuf->total_len; 971 972 if (in_total_len == 0 || outbuf == inbuf) 973 goto done; 974 975 if (outbuf->freeze_end || inbuf->freeze_start) { 976 result = -1; 977 goto done; 978 } 979 980 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 981 result = -1; 982 goto done; 983 } 984 985 if (out_total_len == 0) { 986 /* There might be an empty chain at the start of outbuf; free 987 * it. */ 988 evbuffer_free_all_chains(outbuf->first); 989 COPY_CHAIN(outbuf, inbuf); 990 } else { 991 APPEND_CHAIN(outbuf, inbuf); 992 } 993 994 RESTORE_PINNED(inbuf, pinned, last); 995 996 inbuf->n_del_for_cb += in_total_len; 997 outbuf->n_add_for_cb += in_total_len; 998 999 evbuffer_invoke_callbacks_(inbuf); 1000 evbuffer_invoke_callbacks_(outbuf); 1001 1002 done: 1003 EVBUFFER_UNLOCK2(inbuf, outbuf); 1004 return result; 1005 } 1006 1007 int 1008 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) 1009 { 1010 size_t in_total_len, out_total_len; 1011 struct evbuffer_chain *chain; 1012 int result = 0; 1013 1014 EVBUFFER_LOCK2(inbuf, outbuf); 1015 in_total_len = inbuf->total_len; 1016 out_total_len = outbuf->total_len; 1017 chain = inbuf->first; 1018 1019 if (in_total_len == 0) 1020 goto done; 1021 1022 if (outbuf->freeze_end || outbuf == inbuf) { 1023 result = -1; 1024 goto done; 1025 } 1026 1027 for (; chain; chain = chain->next) { 1028 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { 1029 /* chain type can not be referenced */ 1030 result = -1; 1031 goto done; 1032 } 1033 } 1034 1035 if (out_total_len == 0) { 1036 /* There might be an empty chain at the start of outbuf; free 1037 * it. */ 1038 evbuffer_free_all_chains(outbuf->first); 1039 } 1040 APPEND_CHAIN_MULTICAST(outbuf, inbuf); 1041 1042 outbuf->n_add_for_cb += in_total_len; 1043 evbuffer_invoke_callbacks_(outbuf); 1044 1045 done: 1046 EVBUFFER_UNLOCK2(inbuf, outbuf); 1047 return result; 1048 } 1049 1050 int 1051 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 1052 { 1053 struct evbuffer_chain *pinned, *last; 1054 size_t in_total_len, out_total_len; 1055 int result = 0; 1056 1057 EVBUFFER_LOCK2(inbuf, outbuf); 1058 1059 in_total_len = inbuf->total_len; 1060 out_total_len = outbuf->total_len; 1061 1062 if (!in_total_len || inbuf == outbuf) 1063 goto done; 1064 1065 if (outbuf->freeze_start || inbuf->freeze_start) { 1066 result = -1; 1067 goto done; 1068 } 1069 1070 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 1071 result = -1; 1072 goto done; 1073 } 1074 1075 if (out_total_len == 0) { 1076 /* There might be an empty chain at the start of outbuf; free 1077 * it. */ 1078 evbuffer_free_all_chains(outbuf->first); 1079 COPY_CHAIN(outbuf, inbuf); 1080 } else { 1081 PREPEND_CHAIN(outbuf, inbuf); 1082 } 1083 1084 RESTORE_PINNED(inbuf, pinned, last); 1085 1086 inbuf->n_del_for_cb += in_total_len; 1087 outbuf->n_add_for_cb += in_total_len; 1088 1089 evbuffer_invoke_callbacks_(inbuf); 1090 evbuffer_invoke_callbacks_(outbuf); 1091 done: 1092 EVBUFFER_UNLOCK2(inbuf, outbuf); 1093 return result; 1094 } 1095 1096 int 1097 evbuffer_drain(struct evbuffer *buf, size_t len) 1098 { 1099 struct evbuffer_chain *chain, *next; 1100 size_t remaining, old_len; 1101 int result = 0; 1102 1103 EVBUFFER_LOCK(buf); 1104 old_len = buf->total_len; 1105 1106 if (old_len == 0) 1107 goto done; 1108 1109 if (buf->freeze_start) { 1110 result = -1; 1111 goto done; 1112 } 1113 1114 if (len >= old_len && !HAS_PINNED_R(buf)) { 1115 len = old_len; 1116 for (chain = buf->first; chain != NULL; chain = next) { 1117 next = chain->next; 1118 evbuffer_chain_free(chain); 1119 } 1120 1121 ZERO_CHAIN(buf); 1122 } else { 1123 if (len >= old_len) 1124 len = old_len; 1125 1126 buf->total_len -= len; 1127 remaining = len; 1128 for (chain = buf->first; 1129 remaining >= chain->off; 1130 chain = next) { 1131 next = chain->next; 1132 remaining -= chain->off; 1133 1134 if (chain == *buf->last_with_datap) { 1135 buf->last_with_datap = &buf->first; 1136 } 1137 if (&chain->next == buf->last_with_datap) 1138 buf->last_with_datap = &buf->first; 1139 1140 if (CHAIN_PINNED_R(chain)) { 1141 EVUTIL_ASSERT(remaining == 0); 1142 chain->misalign += chain->off; 1143 chain->off = 0; 1144 break; 1145 } else 1146 evbuffer_chain_free(chain); 1147 } 1148 1149 buf->first = chain; 1150 EVUTIL_ASSERT(remaining <= chain->off); 1151 chain->misalign += remaining; 1152 chain->off -= remaining; 1153 } 1154 1155 buf->n_del_for_cb += len; 1156 /* Tell someone about changes in this buffer */ 1157 evbuffer_invoke_callbacks_(buf); 1158 1159 done: 1160 EVBUFFER_UNLOCK(buf); 1161 return result; 1162 } 1163 1164 /* Reads data from an event buffer and drains the bytes read */ 1165 int 1166 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1167 { 1168 ev_ssize_t n; 1169 EVBUFFER_LOCK(buf); 1170 n = evbuffer_copyout_from(buf, NULL, data_out, datlen); 1171 if (n > 0) { 1172 if (evbuffer_drain(buf, n)<0) 1173 n = -1; 1174 } 1175 EVBUFFER_UNLOCK(buf); 1176 return (int)n; 1177 } 1178 1179 ev_ssize_t 1180 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1181 { 1182 return evbuffer_copyout_from(buf, NULL, data_out, datlen); 1183 } 1184 1185 ev_ssize_t 1186 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, 1187 void *data_out, size_t datlen) 1188 { 1189 /*XXX fails badly on sendfile case. */ 1190 struct evbuffer_chain *chain; 1191 char *data = data_out; 1192 size_t nread; 1193 ev_ssize_t result = 0; 1194 size_t pos_in_chain; 1195 1196 EVBUFFER_LOCK(buf); 1197 1198 if (pos) { 1199 if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { 1200 result = -1; 1201 goto done; 1202 } 1203 chain = pos->internal_.chain; 1204 pos_in_chain = pos->internal_.pos_in_chain; 1205 if (datlen + pos->pos > buf->total_len) 1206 datlen = buf->total_len - pos->pos; 1207 } else { 1208 chain = buf->first; 1209 pos_in_chain = 0; 1210 if (datlen > buf->total_len) 1211 datlen = buf->total_len; 1212 } 1213 1214 1215 if (datlen == 0) 1216 goto done; 1217 1218 if (buf->freeze_start) { 1219 result = -1; 1220 goto done; 1221 } 1222 1223 nread = datlen; 1224 1225 while (datlen && datlen >= chain->off - pos_in_chain) { 1226 size_t copylen = chain->off - pos_in_chain; 1227 memcpy(data, 1228 chain->buffer + chain->misalign + pos_in_chain, 1229 copylen); 1230 data += copylen; 1231 datlen -= copylen; 1232 1233 chain = chain->next; 1234 pos_in_chain = 0; 1235 EVUTIL_ASSERT(chain || datlen==0); 1236 } 1237 1238 if (datlen) { 1239 EVUTIL_ASSERT(chain); 1240 EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off); 1241 1242 memcpy(data, chain->buffer + chain->misalign + pos_in_chain, 1243 datlen); 1244 } 1245 1246 result = nread; 1247 done: 1248 EVBUFFER_UNLOCK(buf); 1249 return result; 1250 } 1251 1252 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1253 * possible. */ 1254 /* XXXX should return ev_ssize_t */ 1255 int 1256 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1257 size_t datlen) 1258 { 1259 /*XXX We should have an option to force this to be zero-copy.*/ 1260 1261 /*XXX can fail badly on sendfile case. */ 1262 struct evbuffer_chain *chain, *previous; 1263 size_t nread = 0; 1264 int result; 1265 1266 EVBUFFER_LOCK2(src, dst); 1267 1268 chain = previous = src->first; 1269 1270 if (datlen == 0 || dst == src) { 1271 result = 0; 1272 goto done; 1273 } 1274 1275 if (dst->freeze_end || src->freeze_start) { 1276 result = -1; 1277 goto done; 1278 } 1279 1280 /* short-cut if there is no more data buffered */ 1281 if (datlen >= src->total_len) { 1282 datlen = src->total_len; 1283 evbuffer_add_buffer(dst, src); 1284 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1285 goto done; 1286 } 1287 1288 /* removes chains if possible */ 1289 while (chain->off <= datlen) { 1290 /* We can't remove the last with data from src unless we 1291 * remove all chains, in which case we would have done the if 1292 * block above */ 1293 EVUTIL_ASSERT(chain != *src->last_with_datap); 1294 nread += chain->off; 1295 datlen -= chain->off; 1296 previous = chain; 1297 if (src->last_with_datap == &chain->next) 1298 src->last_with_datap = &src->first; 1299 chain = chain->next; 1300 } 1301 1302 if (nread) { 1303 /* we can remove the chain */ 1304 struct evbuffer_chain **chp; 1305 chp = evbuffer_free_trailing_empty_chains(dst); 1306 1307 if (dst->first == NULL) { 1308 dst->first = src->first; 1309 } else { 1310 *chp = src->first; 1311 } 1312 dst->last = previous; 1313 previous->next = NULL; 1314 src->first = chain; 1315 advance_last_with_data(dst); 1316 1317 dst->total_len += nread; 1318 dst->n_add_for_cb += nread; 1319 } 1320 1321 /* we know that there is more data in the src buffer than 1322 * we want to read, so we manually drain the chain */ 1323 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1324 chain->misalign += datlen; 1325 chain->off -= datlen; 1326 nread += datlen; 1327 1328 /* You might think we would want to increment dst->n_add_for_cb 1329 * here too. But evbuffer_add above already took care of that. 1330 */ 1331 src->total_len -= nread; 1332 src->n_del_for_cb += nread; 1333 1334 if (nread) { 1335 evbuffer_invoke_callbacks_(dst); 1336 evbuffer_invoke_callbacks_(src); 1337 } 1338 result = (int)nread;/*XXXX should change return type */ 1339 1340 done: 1341 EVBUFFER_UNLOCK2(src, dst); 1342 return result; 1343 } 1344 1345 unsigned char * 1346 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1347 { 1348 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1349 unsigned char *buffer, *result = NULL; 1350 ev_ssize_t remaining; 1351 int removed_last_with_data = 0; 1352 int removed_last_with_datap = 0; 1353 1354 EVBUFFER_LOCK(buf); 1355 1356 chain = buf->first; 1357 1358 if (size < 0) 1359 size = buf->total_len; 1360 /* if size > buf->total_len, we cannot guarantee to the user that she 1361 * is going to have a long enough buffer afterwards; so we return 1362 * NULL */ 1363 if (size == 0 || (size_t)size > buf->total_len) 1364 goto done; 1365 1366 /* No need to pull up anything; the first size bytes are 1367 * already here. */ 1368 if (chain->off >= (size_t)size) { 1369 result = chain->buffer + chain->misalign; 1370 goto done; 1371 } 1372 1373 /* Make sure that none of the chains we need to copy from is pinned. */ 1374 remaining = size - chain->off; 1375 EVUTIL_ASSERT(remaining >= 0); 1376 for (tmp=chain->next; tmp; tmp=tmp->next) { 1377 if (CHAIN_PINNED(tmp)) 1378 goto done; 1379 if (tmp->off >= (size_t)remaining) 1380 break; 1381 remaining -= tmp->off; 1382 } 1383 1384 if (CHAIN_PINNED(chain)) { 1385 size_t old_off = chain->off; 1386 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1387 /* not enough room at end of chunk. */ 1388 goto done; 1389 } 1390 buffer = CHAIN_SPACE_PTR(chain); 1391 tmp = chain; 1392 tmp->off = size; 1393 size -= old_off; 1394 chain = chain->next; 1395 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1396 /* already have enough space in the first chain */ 1397 size_t old_off = chain->off; 1398 buffer = chain->buffer + chain->misalign + chain->off; 1399 tmp = chain; 1400 tmp->off = size; 1401 size -= old_off; 1402 chain = chain->next; 1403 } else { 1404 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1405 event_warn("%s: out of memory", __func__); 1406 goto done; 1407 } 1408 buffer = tmp->buffer; 1409 tmp->off = size; 1410 buf->first = tmp; 1411 } 1412 1413 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1414 1415 /* Copy and free every chunk that will be entirely pulled into tmp */ 1416 last_with_data = *buf->last_with_datap; 1417 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1418 next = chain->next; 1419 1420 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1421 size -= chain->off; 1422 buffer += chain->off; 1423 if (chain == last_with_data) 1424 removed_last_with_data = 1; 1425 if (&chain->next == buf->last_with_datap) 1426 removed_last_with_datap = 1; 1427 1428 evbuffer_chain_free(chain); 1429 } 1430 1431 if (chain != NULL) { 1432 memcpy(buffer, chain->buffer + chain->misalign, size); 1433 chain->misalign += size; 1434 chain->off -= size; 1435 } else { 1436 buf->last = tmp; 1437 } 1438 1439 tmp->next = chain; 1440 1441 if (removed_last_with_data) { 1442 buf->last_with_datap = &buf->first; 1443 } else if (removed_last_with_datap) { 1444 if (buf->first->next && buf->first->next->off) 1445 buf->last_with_datap = &buf->first->next; 1446 else 1447 buf->last_with_datap = &buf->first; 1448 } 1449 1450 result = (tmp->buffer + tmp->misalign); 1451 1452 done: 1453 EVBUFFER_UNLOCK(buf); 1454 return result; 1455 } 1456 1457 /* 1458 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1459 * The returned buffer needs to be freed by the called. 1460 */ 1461 char * 1462 evbuffer_readline(struct evbuffer *buffer) 1463 { 1464 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1465 } 1466 1467 static inline ev_ssize_t 1468 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1469 { 1470 struct evbuffer_chain *chain = it->internal_.chain; 1471 size_t i = it->internal_.pos_in_chain; 1472 while (chain != NULL) { 1473 char *buffer = (char *)chain->buffer + chain->misalign; 1474 char *cp = memchr(buffer+i, chr, chain->off-i); 1475 if (cp) { 1476 it->internal_.chain = chain; 1477 it->internal_.pos_in_chain = cp - buffer; 1478 it->pos += (cp - buffer - i); 1479 return it->pos; 1480 } 1481 it->pos += chain->off - i; 1482 i = 0; 1483 chain = chain->next; 1484 } 1485 1486 return (-1); 1487 } 1488 1489 static inline char * 1490 find_eol_char(char *s, size_t len) 1491 { 1492 #define CHUNK_SZ 128 1493 /* Lots of benchmarking found this approach to be faster in practice 1494 * than doing two memchrs over the whole buffer, doin a memchr on each 1495 * char of the buffer, or trying to emulate memchr by hand. */ 1496 char *s_end, *cr, *lf; 1497 s_end = s+len; 1498 while (s < s_end) { 1499 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1500 cr = memchr(s, '\r', chunk); 1501 lf = memchr(s, '\n', chunk); 1502 if (cr) { 1503 if (lf && lf < cr) 1504 return lf; 1505 return cr; 1506 } else if (lf) { 1507 return lf; 1508 } 1509 s += CHUNK_SZ; 1510 } 1511 1512 return NULL; 1513 #undef CHUNK_SZ 1514 } 1515 1516 static ev_ssize_t 1517 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1518 { 1519 struct evbuffer_chain *chain = it->internal_.chain; 1520 size_t i = it->internal_.pos_in_chain; 1521 while (chain != NULL) { 1522 char *buffer = (char *)chain->buffer + chain->misalign; 1523 char *cp = find_eol_char(buffer+i, chain->off-i); 1524 if (cp) { 1525 it->internal_.chain = chain; 1526 it->internal_.pos_in_chain = cp - buffer; 1527 it->pos += (cp - buffer) - i; 1528 return it->pos; 1529 } 1530 it->pos += chain->off - i; 1531 i = 0; 1532 chain = chain->next; 1533 } 1534 1535 return (-1); 1536 } 1537 1538 static inline size_t 1539 evbuffer_strspn( 1540 struct evbuffer_ptr *ptr, const char *chrset) 1541 { 1542 size_t count = 0; 1543 struct evbuffer_chain *chain = ptr->internal_.chain; 1544 size_t i = ptr->internal_.pos_in_chain; 1545 1546 if (!chain) 1547 return 0; 1548 1549 while (1) { 1550 char *buffer = (char *)chain->buffer + chain->misalign; 1551 for (; i < chain->off; ++i) { 1552 const char *p = chrset; 1553 while (*p) { 1554 if (buffer[i] == *p++) 1555 goto next; 1556 } 1557 ptr->internal_.chain = chain; 1558 ptr->internal_.pos_in_chain = i; 1559 ptr->pos += count; 1560 return count; 1561 next: 1562 ++count; 1563 } 1564 i = 0; 1565 1566 if (! chain->next) { 1567 ptr->internal_.chain = chain; 1568 ptr->internal_.pos_in_chain = i; 1569 ptr->pos += count; 1570 return count; 1571 } 1572 1573 chain = chain->next; 1574 } 1575 } 1576 1577 1578 static inline int 1579 evbuffer_getchr(struct evbuffer_ptr *it) 1580 { 1581 struct evbuffer_chain *chain = it->internal_.chain; 1582 size_t off = it->internal_.pos_in_chain; 1583 1584 if (chain == NULL) 1585 return -1; 1586 1587 return (unsigned char)chain->buffer[chain->misalign + off]; 1588 } 1589 1590 struct evbuffer_ptr 1591 evbuffer_search_eol(struct evbuffer *buffer, 1592 struct evbuffer_ptr *start, size_t *eol_len_out, 1593 enum evbuffer_eol_style eol_style) 1594 { 1595 struct evbuffer_ptr it, it2; 1596 size_t extra_drain = 0; 1597 int ok = 0; 1598 1599 /* Avoid locking in trivial edge cases */ 1600 if (start && start->internal_.chain == NULL) { 1601 PTR_NOT_FOUND(&it); 1602 if (eol_len_out) 1603 *eol_len_out = extra_drain; 1604 return it; 1605 } 1606 1607 EVBUFFER_LOCK(buffer); 1608 1609 if (start) { 1610 memcpy(&it, start, sizeof(it)); 1611 } else { 1612 it.pos = 0; 1613 it.internal_.chain = buffer->first; 1614 it.internal_.pos_in_chain = 0; 1615 } 1616 1617 /* the eol_style determines our first stop character and how many 1618 * characters we are going to drain afterwards. */ 1619 switch (eol_style) { 1620 case EVBUFFER_EOL_ANY: 1621 if (evbuffer_find_eol_char(&it) < 0) 1622 goto done; 1623 memcpy(&it2, &it, sizeof(it)); 1624 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1625 break; 1626 case EVBUFFER_EOL_CRLF_STRICT: { 1627 it = evbuffer_search(buffer, "\r\n", 2, &it); 1628 if (it.pos < 0) 1629 goto done; 1630 extra_drain = 2; 1631 break; 1632 } 1633 case EVBUFFER_EOL_CRLF: { 1634 ev_ssize_t start_pos = it.pos; 1635 /* Look for a LF ... */ 1636 if (evbuffer_strchr(&it, '\n') < 0) 1637 goto done; 1638 extra_drain = 1; 1639 /* ... optionally preceeded by a CR. */ 1640 if (it.pos == start_pos) 1641 break; /* If the first character is \n, don't back up */ 1642 /* This potentially does an extra linear walk over the first 1643 * few chains. Probably, that's not too expensive unless you 1644 * have a really pathological setup. */ 1645 memcpy(&it2, &it, sizeof(it)); 1646 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) 1647 break; 1648 if (evbuffer_getchr(&it2) == '\r') { 1649 memcpy(&it, &it2, sizeof(it)); 1650 extra_drain = 2; 1651 } 1652 break; 1653 } 1654 case EVBUFFER_EOL_LF: 1655 if (evbuffer_strchr(&it, '\n') < 0) 1656 goto done; 1657 extra_drain = 1; 1658 break; 1659 case EVBUFFER_EOL_NUL: 1660 if (evbuffer_strchr(&it, '\0') < 0) 1661 goto done; 1662 extra_drain = 1; 1663 break; 1664 default: 1665 goto done; 1666 } 1667 1668 ok = 1; 1669 done: 1670 EVBUFFER_UNLOCK(buffer); 1671 1672 if (!ok) 1673 PTR_NOT_FOUND(&it); 1674 if (eol_len_out) 1675 *eol_len_out = extra_drain; 1676 1677 return it; 1678 } 1679 1680 char * 1681 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1682 enum evbuffer_eol_style eol_style) 1683 { 1684 struct evbuffer_ptr it; 1685 char *line; 1686 size_t n_to_copy=0, extra_drain=0; 1687 char *result = NULL; 1688 1689 EVBUFFER_LOCK(buffer); 1690 1691 if (buffer->freeze_start) { 1692 goto done; 1693 } 1694 1695 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1696 if (it.pos < 0) 1697 goto done; 1698 n_to_copy = it.pos; 1699 1700 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1701 event_warn("%s: out of memory", __func__); 1702 goto done; 1703 } 1704 1705 evbuffer_remove(buffer, line, n_to_copy); 1706 line[n_to_copy] = '\0'; 1707 1708 evbuffer_drain(buffer, extra_drain); 1709 result = line; 1710 done: 1711 EVBUFFER_UNLOCK(buffer); 1712 1713 if (n_read_out) 1714 *n_read_out = result ? n_to_copy : 0; 1715 1716 return result; 1717 } 1718 1719 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1720 1721 /* Adds data to an event buffer */ 1722 1723 int 1724 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1725 { 1726 struct evbuffer_chain *chain, *tmp; 1727 const unsigned char *data = data_in; 1728 size_t remain, to_alloc; 1729 int result = -1; 1730 1731 EVBUFFER_LOCK(buf); 1732 1733 if (buf->freeze_end) { 1734 goto done; 1735 } 1736 /* Prevent buf->total_len overflow */ 1737 if (datlen > EV_SIZE_MAX - buf->total_len) { 1738 goto done; 1739 } 1740 1741 if (*buf->last_with_datap == NULL) { 1742 chain = buf->last; 1743 } else { 1744 chain = *buf->last_with_datap; 1745 } 1746 1747 /* If there are no chains allocated for this buffer, allocate one 1748 * big enough to hold all the data. */ 1749 if (chain == NULL) { 1750 chain = evbuffer_chain_new(datlen); 1751 if (!chain) 1752 goto done; 1753 evbuffer_chain_insert(buf, chain); 1754 } 1755 1756 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1757 /* Always true for mutable buffers */ 1758 EVUTIL_ASSERT(chain->misalign >= 0 && 1759 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1760 remain = chain->buffer_len - (size_t)chain->misalign - chain->off; 1761 if (remain >= datlen) { 1762 /* there's enough space to hold all the data in the 1763 * current last chain */ 1764 memcpy(chain->buffer + chain->misalign + chain->off, 1765 data, datlen); 1766 chain->off += datlen; 1767 buf->total_len += datlen; 1768 buf->n_add_for_cb += datlen; 1769 goto out; 1770 } else if (!CHAIN_PINNED(chain) && 1771 evbuffer_chain_should_realign(chain, datlen)) { 1772 /* we can fit the data into the misalignment */ 1773 evbuffer_chain_align(chain); 1774 1775 memcpy(chain->buffer + chain->off, data, datlen); 1776 chain->off += datlen; 1777 buf->total_len += datlen; 1778 buf->n_add_for_cb += datlen; 1779 goto out; 1780 } 1781 } else { 1782 /* we cannot write any data to the last chain */ 1783 remain = 0; 1784 } 1785 1786 /* we need to add another chain */ 1787 to_alloc = chain->buffer_len; 1788 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1789 to_alloc <<= 1; 1790 if (datlen > to_alloc) 1791 to_alloc = datlen; 1792 tmp = evbuffer_chain_new(to_alloc); 1793 if (tmp == NULL) 1794 goto done; 1795 1796 if (remain) { 1797 memcpy(chain->buffer + chain->misalign + chain->off, 1798 data, remain); 1799 chain->off += remain; 1800 buf->total_len += remain; 1801 buf->n_add_for_cb += remain; 1802 } 1803 1804 data += remain; 1805 datlen -= remain; 1806 1807 memcpy(tmp->buffer, data, datlen); 1808 tmp->off = datlen; 1809 evbuffer_chain_insert(buf, tmp); 1810 buf->n_add_for_cb += datlen; 1811 1812 out: 1813 evbuffer_invoke_callbacks_(buf); 1814 result = 0; 1815 done: 1816 EVBUFFER_UNLOCK(buf); 1817 return result; 1818 } 1819 1820 int 1821 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1822 { 1823 struct evbuffer_chain *chain, *tmp; 1824 int result = -1; 1825 1826 EVBUFFER_LOCK(buf); 1827 1828 if (buf->freeze_start) { 1829 goto done; 1830 } 1831 if (datlen > EV_SIZE_MAX - buf->total_len) { 1832 goto done; 1833 } 1834 1835 chain = buf->first; 1836 1837 if (chain == NULL) { 1838 chain = evbuffer_chain_new(datlen); 1839 if (!chain) 1840 goto done; 1841 evbuffer_chain_insert(buf, chain); 1842 } 1843 1844 /* we cannot touch immutable buffers */ 1845 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1846 /* Always true for mutable buffers */ 1847 EVUTIL_ASSERT(chain->misalign >= 0 && 1848 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1849 1850 /* If this chain is empty, we can treat it as 1851 * 'empty at the beginning' rather than 'empty at the end' */ 1852 if (chain->off == 0) 1853 chain->misalign = chain->buffer_len; 1854 1855 if ((size_t)chain->misalign >= datlen) { 1856 /* we have enough space to fit everything */ 1857 memcpy(chain->buffer + chain->misalign - datlen, 1858 data, datlen); 1859 chain->off += datlen; 1860 chain->misalign -= datlen; 1861 buf->total_len += datlen; 1862 buf->n_add_for_cb += datlen; 1863 goto out; 1864 } else if (chain->misalign) { 1865 /* we can only fit some of the data. */ 1866 memcpy(chain->buffer, 1867 (char*)data + datlen - chain->misalign, 1868 (size_t)chain->misalign); 1869 chain->off += (size_t)chain->misalign; 1870 buf->total_len += (size_t)chain->misalign; 1871 buf->n_add_for_cb += (size_t)chain->misalign; 1872 datlen -= (size_t)chain->misalign; 1873 chain->misalign = 0; 1874 } 1875 } 1876 1877 /* we need to add another chain */ 1878 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1879 goto done; 1880 buf->first = tmp; 1881 if (buf->last_with_datap == &buf->first) 1882 buf->last_with_datap = &tmp->next; 1883 1884 tmp->next = chain; 1885 1886 tmp->off = datlen; 1887 EVUTIL_ASSERT(datlen <= tmp->buffer_len); 1888 tmp->misalign = tmp->buffer_len - datlen; 1889 1890 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1891 buf->total_len += datlen; 1892 buf->n_add_for_cb += datlen; 1893 1894 out: 1895 evbuffer_invoke_callbacks_(buf); 1896 result = 0; 1897 done: 1898 EVBUFFER_UNLOCK(buf); 1899 return result; 1900 } 1901 1902 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1903 static void 1904 evbuffer_chain_align(struct evbuffer_chain *chain) 1905 { 1906 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1907 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1908 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1909 chain->misalign = 0; 1910 } 1911 1912 #define MAX_TO_COPY_IN_EXPAND 4096 1913 #define MAX_TO_REALIGN_IN_EXPAND 2048 1914 1915 /** Helper: return true iff we should realign chain to fit datalen bytes of 1916 data in it. */ 1917 static int 1918 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1919 size_t datlen) 1920 { 1921 return chain->buffer_len - chain->off >= datlen && 1922 (chain->off < chain->buffer_len / 2) && 1923 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1924 } 1925 1926 /* Expands the available space in the event buffer to at least datlen, all in 1927 * a single chunk. Return that chunk. */ 1928 static struct evbuffer_chain * 1929 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1930 { 1931 struct evbuffer_chain *chain, **chainp; 1932 struct evbuffer_chain *result = NULL; 1933 ASSERT_EVBUFFER_LOCKED(buf); 1934 1935 chainp = buf->last_with_datap; 1936 1937 /* XXX If *chainp is no longer writeable, but has enough space in its 1938 * misalign, this might be a bad idea: we could still use *chainp, not 1939 * (*chainp)->next. */ 1940 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1941 chainp = &(*chainp)->next; 1942 1943 /* 'chain' now points to the first chain with writable space (if any) 1944 * We will either use it, realign it, replace it, or resize it. */ 1945 chain = *chainp; 1946 1947 if (chain == NULL || 1948 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1949 /* We can't use the last_with_data chain at all. Just add a 1950 * new one that's big enough. */ 1951 goto insert_new; 1952 } 1953 1954 /* If we can fit all the data, then we don't have to do anything */ 1955 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1956 result = chain; 1957 goto ok; 1958 } 1959 1960 /* If the chain is completely empty, just replace it by adding a new 1961 * empty chain. */ 1962 if (chain->off == 0) { 1963 goto insert_new; 1964 } 1965 1966 /* If the misalignment plus the remaining space fulfills our data 1967 * needs, we could just force an alignment to happen. Afterwards, we 1968 * have enough space. But only do this if we're saving a lot of space 1969 * and not moving too much data. Otherwise the space savings are 1970 * probably offset by the time lost in copying. 1971 */ 1972 if (evbuffer_chain_should_realign(chain, datlen)) { 1973 evbuffer_chain_align(chain); 1974 result = chain; 1975 goto ok; 1976 } 1977 1978 /* At this point, we can either resize the last chunk with space in 1979 * it, use the next chunk after it, or If we add a new chunk, we waste 1980 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1981 * resize, we have to copy chain->off bytes. 1982 */ 1983 1984 /* Would expanding this chunk be affordable and worthwhile? */ 1985 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1986 chain->off > MAX_TO_COPY_IN_EXPAND || 1987 datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { 1988 /* It's not worth resizing this chain. Can the next one be 1989 * used? */ 1990 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 1991 /* Yes, we can just use the next chain (which should 1992 * be empty. */ 1993 result = chain->next; 1994 goto ok; 1995 } else { 1996 /* No; append a new chain (which will free all 1997 * terminal empty chains.) */ 1998 goto insert_new; 1999 } 2000 } else { 2001 /* Okay, we're going to try to resize this chain: Not doing so 2002 * would waste at least 1/8 of its current allocation, and we 2003 * can do so without having to copy more than 2004 * MAX_TO_COPY_IN_EXPAND bytes. */ 2005 /* figure out how much space we need */ 2006 size_t length = chain->off + datlen; 2007 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 2008 if (tmp == NULL) 2009 goto err; 2010 2011 /* copy the data over that we had so far */ 2012 tmp->off = chain->off; 2013 memcpy(tmp->buffer, chain->buffer + chain->misalign, 2014 chain->off); 2015 /* fix up the list */ 2016 EVUTIL_ASSERT(*chainp == chain); 2017 result = *chainp = tmp; 2018 2019 if (buf->last == chain) 2020 buf->last = tmp; 2021 2022 tmp->next = chain->next; 2023 evbuffer_chain_free(chain); 2024 goto ok; 2025 } 2026 2027 insert_new: 2028 result = evbuffer_chain_insert_new(buf, datlen); 2029 if (!result) 2030 goto err; 2031 ok: 2032 EVUTIL_ASSERT(result); 2033 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 2034 err: 2035 return result; 2036 } 2037 2038 /* Make sure that datlen bytes are available for writing in the last n 2039 * chains. Never copies or moves data. */ 2040 int 2041 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) 2042 { 2043 struct evbuffer_chain *chain = buf->last, *tmp, *next; 2044 size_t avail; 2045 int used; 2046 2047 ASSERT_EVBUFFER_LOCKED(buf); 2048 EVUTIL_ASSERT(n >= 2); 2049 2050 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 2051 /* There is no last chunk, or we can't touch the last chunk. 2052 * Just add a new chunk. */ 2053 chain = evbuffer_chain_new(datlen); 2054 if (chain == NULL) 2055 return (-1); 2056 2057 evbuffer_chain_insert(buf, chain); 2058 return (0); 2059 } 2060 2061 used = 0; /* number of chains we're using space in. */ 2062 avail = 0; /* how much space they have. */ 2063 /* How many bytes can we stick at the end of buffer as it is? Iterate 2064 * over the chains at the end of the buffer, tring to see how much 2065 * space we have in the first n. */ 2066 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 2067 if (chain->off) { 2068 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 2069 EVUTIL_ASSERT(chain == *buf->last_with_datap); 2070 if (space) { 2071 avail += space; 2072 ++used; 2073 } 2074 } else { 2075 /* No data in chain; realign it. */ 2076 chain->misalign = 0; 2077 avail += chain->buffer_len; 2078 ++used; 2079 } 2080 if (avail >= datlen) { 2081 /* There is already enough space. Just return */ 2082 return (0); 2083 } 2084 if (used == n) 2085 break; 2086 } 2087 2088 /* There wasn't enough space in the first n chains with space in 2089 * them. Either add a new chain with enough space, or replace all 2090 * empty chains with one that has enough space, depending on n. */ 2091 if (used < n) { 2092 /* The loop ran off the end of the chains before it hit n 2093 * chains; we can add another. */ 2094 EVUTIL_ASSERT(chain == NULL); 2095 2096 tmp = evbuffer_chain_new(datlen - avail); 2097 if (tmp == NULL) 2098 return (-1); 2099 2100 buf->last->next = tmp; 2101 buf->last = tmp; 2102 /* (we would only set last_with_data if we added the first 2103 * chain. But if the buffer had no chains, we would have 2104 * just allocated a new chain earlier) */ 2105 return (0); 2106 } else { 2107 /* Nuke _all_ the empty chains. */ 2108 int rmv_all = 0; /* True iff we removed last_with_data. */ 2109 chain = *buf->last_with_datap; 2110 if (!chain->off) { 2111 EVUTIL_ASSERT(chain == buf->first); 2112 rmv_all = 1; 2113 avail = 0; 2114 } else { 2115 /* can't overflow, since only mutable chains have 2116 * huge misaligns. */ 2117 avail = (size_t) CHAIN_SPACE_LEN(chain); 2118 chain = chain->next; 2119 } 2120 2121 2122 for (; chain; chain = next) { 2123 next = chain->next; 2124 EVUTIL_ASSERT(chain->off == 0); 2125 evbuffer_chain_free(chain); 2126 } 2127 EVUTIL_ASSERT(datlen >= avail); 2128 tmp = evbuffer_chain_new(datlen - avail); 2129 if (tmp == NULL) { 2130 if (rmv_all) { 2131 ZERO_CHAIN(buf); 2132 } else { 2133 buf->last = *buf->last_with_datap; 2134 (*buf->last_with_datap)->next = NULL; 2135 } 2136 return (-1); 2137 } 2138 2139 if (rmv_all) { 2140 buf->first = buf->last = tmp; 2141 buf->last_with_datap = &buf->first; 2142 } else { 2143 (*buf->last_with_datap)->next = tmp; 2144 buf->last = tmp; 2145 } 2146 return (0); 2147 } 2148 } 2149 2150 int 2151 evbuffer_expand(struct evbuffer *buf, size_t datlen) 2152 { 2153 struct evbuffer_chain *chain; 2154 2155 EVBUFFER_LOCK(buf); 2156 chain = evbuffer_expand_singlechain(buf, datlen); 2157 EVBUFFER_UNLOCK(buf); 2158 return chain ? 0 : -1; 2159 } 2160 2161 /* 2162 * Reads data from a file descriptor into a buffer. 2163 */ 2164 2165 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) 2166 #define USE_IOVEC_IMPL 2167 #endif 2168 2169 #ifdef USE_IOVEC_IMPL 2170 2171 #ifdef EVENT__HAVE_SYS_UIO_H 2172 /* number of iovec we use for writev, fragmentation is going to determine 2173 * how much we end up writing */ 2174 2175 #define DEFAULT_WRITE_IOVEC 128 2176 2177 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 2178 #define NUM_WRITE_IOVEC UIO_MAXIOV 2179 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 2180 #define NUM_WRITE_IOVEC IOV_MAX 2181 #else 2182 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 2183 #endif 2184 2185 #define IOV_TYPE struct iovec 2186 #define IOV_PTR_FIELD iov_base 2187 #define IOV_LEN_FIELD iov_len 2188 #define IOV_LEN_TYPE size_t 2189 #else 2190 #define NUM_WRITE_IOVEC 16 2191 #define IOV_TYPE WSABUF 2192 #define IOV_PTR_FIELD buf 2193 #define IOV_LEN_FIELD len 2194 #define IOV_LEN_TYPE unsigned long 2195 #endif 2196 #endif 2197 #define NUM_READ_IOVEC 4 2198 2199 #define EVBUFFER_MAX_READ 4096 2200 2201 /** Helper function to figure out which space to use for reading data into 2202 an evbuffer. Internal use only. 2203 2204 @param buf The buffer to read into 2205 @param howmuch How much we want to read. 2206 @param vecs An array of two or more iovecs or WSABUFs. 2207 @param n_vecs_avail The length of vecs 2208 @param chainp A pointer to a variable to hold the first chain we're 2209 reading into. 2210 @param exact Boolean: if true, we do not provide more than 'howmuch' 2211 space in the vectors, even if more space is available. 2212 @return The number of buffers we're using. 2213 */ 2214 int 2215 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, 2216 struct evbuffer_iovec *vecs, int n_vecs_avail, 2217 struct evbuffer_chain ***chainp, int exact) 2218 { 2219 struct evbuffer_chain *chain; 2220 struct evbuffer_chain **firstchainp; 2221 size_t so_far; 2222 int i; 2223 ASSERT_EVBUFFER_LOCKED(buf); 2224 2225 if (howmuch < 0) 2226 return -1; 2227 2228 so_far = 0; 2229 /* Let firstchain be the first chain with any space on it */ 2230 firstchainp = buf->last_with_datap; 2231 EVUTIL_ASSERT(*firstchainp); 2232 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2233 firstchainp = &(*firstchainp)->next; 2234 } 2235 2236 chain = *firstchainp; 2237 EVUTIL_ASSERT(chain); 2238 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2239 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2240 if (avail > (howmuch - so_far) && exact) 2241 avail = howmuch - so_far; 2242 vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain); 2243 vecs[i].iov_len = avail; 2244 so_far += avail; 2245 chain = chain->next; 2246 } 2247 2248 *chainp = firstchainp; 2249 return i; 2250 } 2251 2252 static int 2253 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2254 { 2255 #if defined(FIONREAD) && defined(_WIN32) 2256 unsigned long lng = EVBUFFER_MAX_READ; 2257 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2258 return -1; 2259 /* Can overflow, but mostly harmlessly. XXXX */ 2260 return (int)lng; 2261 #elif defined(FIONREAD) 2262 int n = EVBUFFER_MAX_READ; 2263 if (ioctl(fd, FIONREAD, &n) < 0) 2264 return -1; 2265 return n; 2266 #else 2267 return EVBUFFER_MAX_READ; 2268 #endif 2269 } 2270 2271 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2272 * as howmuch? */ 2273 int 2274 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2275 { 2276 struct evbuffer_chain **chainp; 2277 int n; 2278 int result; 2279 2280 #ifdef USE_IOVEC_IMPL 2281 int nvecs, i, remaining; 2282 #else 2283 struct evbuffer_chain *chain; 2284 unsigned char *p; 2285 #endif 2286 2287 EVBUFFER_LOCK(buf); 2288 2289 if (buf->freeze_end) { 2290 result = -1; 2291 goto done; 2292 } 2293 2294 n = get_n_bytes_readable_on_socket(fd); 2295 if (n <= 0 || n > EVBUFFER_MAX_READ) 2296 n = EVBUFFER_MAX_READ; 2297 if (howmuch < 0 || howmuch > n) 2298 howmuch = n; 2299 2300 #ifdef USE_IOVEC_IMPL 2301 /* Since we can use iovecs, we're willing to use the last 2302 * NUM_READ_IOVEC chains. */ 2303 if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { 2304 result = -1; 2305 goto done; 2306 } else { 2307 IOV_TYPE vecs[NUM_READ_IOVEC]; 2308 #ifdef EVBUFFER_IOVEC_IS_NATIVE_ 2309 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, 2310 NUM_READ_IOVEC, &chainp, 1); 2311 #else 2312 /* We aren't using the native struct iovec. Therefore, 2313 we are on win32. */ 2314 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2315 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, 2316 &chainp, 1); 2317 2318 for (i=0; i < nvecs; ++i) 2319 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2320 #endif 2321 2322 #ifdef _WIN32 2323 { 2324 DWORD bytesRead; 2325 DWORD flags=0; 2326 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2327 /* The read failed. It might be a close, 2328 * or it might be an error. */ 2329 if (WSAGetLastError() == WSAECONNABORTED) 2330 n = 0; 2331 else 2332 n = -1; 2333 } else 2334 n = bytesRead; 2335 } 2336 #else 2337 n = readv(fd, vecs, nvecs); 2338 #endif 2339 } 2340 2341 #else /*!USE_IOVEC_IMPL*/ 2342 /* If we don't have FIONREAD, we might waste some space here */ 2343 /* XXX we _will_ waste some space here if there is any space left 2344 * over on buf->last. */ 2345 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2346 result = -1; 2347 goto done; 2348 } 2349 2350 /* We can append new data at this point */ 2351 p = chain->buffer + chain->misalign + chain->off; 2352 2353 #ifndef _WIN32 2354 n = read(fd, p, howmuch); 2355 #else 2356 n = recv(fd, p, howmuch, 0); 2357 #endif 2358 #endif /* USE_IOVEC_IMPL */ 2359 2360 if (n == -1) { 2361 result = -1; 2362 goto done; 2363 } 2364 if (n == 0) { 2365 result = 0; 2366 goto done; 2367 } 2368 2369 #ifdef USE_IOVEC_IMPL 2370 remaining = n; 2371 for (i=0; i < nvecs; ++i) { 2372 /* can't overflow, since only mutable chains have 2373 * huge misaligns. */ 2374 size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); 2375 /* XXXX This is a kludge that can waste space in perverse 2376 * situations. */ 2377 if (space > EVBUFFER_CHAIN_MAX) 2378 space = EVBUFFER_CHAIN_MAX; 2379 if ((ev_ssize_t)space < remaining) { 2380 (*chainp)->off += space; 2381 remaining -= (int)space; 2382 } else { 2383 (*chainp)->off += remaining; 2384 buf->last_with_datap = chainp; 2385 break; 2386 } 2387 chainp = &(*chainp)->next; 2388 } 2389 #else 2390 chain->off += n; 2391 advance_last_with_data(buf); 2392 #endif 2393 buf->total_len += n; 2394 buf->n_add_for_cb += n; 2395 2396 /* Tell someone about changes in this buffer */ 2397 evbuffer_invoke_callbacks_(buf); 2398 result = n; 2399 done: 2400 EVBUFFER_UNLOCK(buf); 2401 return result; 2402 } 2403 2404 #ifdef USE_IOVEC_IMPL 2405 static inline int 2406 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2407 ev_ssize_t howmuch) 2408 { 2409 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2410 struct evbuffer_chain *chain = buffer->first; 2411 int n, i = 0; 2412 2413 if (howmuch < 0) 2414 return -1; 2415 2416 ASSERT_EVBUFFER_LOCKED(buffer); 2417 /* XXX make this top out at some maximal data length? if the 2418 * buffer has (say) 1MB in it, split over 128 chains, there's 2419 * no way it all gets written in one go. */ 2420 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2421 #ifdef USE_SENDFILE 2422 /* we cannot write the file info via writev */ 2423 if (chain->flags & EVBUFFER_SENDFILE) 2424 break; 2425 #endif 2426 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2427 if ((size_t)howmuch >= chain->off) { 2428 /* XXXcould be problematic when windows supports mmap*/ 2429 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2430 howmuch -= chain->off; 2431 } else { 2432 /* XXXcould be problematic when windows supports mmap*/ 2433 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2434 break; 2435 } 2436 chain = chain->next; 2437 } 2438 if (! i) 2439 return 0; 2440 2441 #ifdef _WIN32 2442 { 2443 DWORD bytesSent; 2444 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2445 n = -1; 2446 else 2447 n = bytesSent; 2448 } 2449 #else 2450 n = writev(fd, iov, i); 2451 #endif 2452 return (n); 2453 } 2454 #endif 2455 2456 #ifdef USE_SENDFILE 2457 static inline int 2458 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2459 ev_ssize_t howmuch) 2460 { 2461 struct evbuffer_chain *chain = buffer->first; 2462 struct evbuffer_chain_file_segment *info = 2463 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2464 chain); 2465 const int source_fd = info->segment->fd; 2466 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2467 int res; 2468 ev_off_t len = chain->off; 2469 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2470 ev_ssize_t res; 2471 off_t offset = chain->misalign; 2472 #endif 2473 2474 ASSERT_EVBUFFER_LOCKED(buffer); 2475 2476 #if defined(SENDFILE_IS_MACOSX) 2477 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2478 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2479 return (-1); 2480 2481 return (len); 2482 #elif defined(SENDFILE_IS_FREEBSD) 2483 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2484 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2485 return (-1); 2486 2487 return (len); 2488 #elif defined(SENDFILE_IS_LINUX) 2489 /* TODO(niels): implement splice */ 2490 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2491 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2492 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2493 return (0); 2494 } 2495 return (res); 2496 #elif defined(SENDFILE_IS_SOLARIS) 2497 { 2498 const off_t offset_orig = offset; 2499 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2500 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2501 if (offset - offset_orig) 2502 return offset - offset_orig; 2503 /* if this is EAGAIN or EINTR and no bytes were 2504 * written, return 0 */ 2505 return (0); 2506 } 2507 return (res); 2508 } 2509 #endif 2510 } 2511 #endif 2512 2513 int 2514 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2515 ev_ssize_t howmuch) 2516 { 2517 int n = -1; 2518 2519 EVBUFFER_LOCK(buffer); 2520 2521 if (buffer->freeze_start) { 2522 goto done; 2523 } 2524 2525 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2526 howmuch = buffer->total_len; 2527 2528 if (howmuch > 0) { 2529 #ifdef USE_SENDFILE 2530 struct evbuffer_chain *chain = buffer->first; 2531 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2532 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2533 else { 2534 #endif 2535 #ifdef USE_IOVEC_IMPL 2536 n = evbuffer_write_iovec(buffer, fd, howmuch); 2537 #elif defined(_WIN32) 2538 /* XXX(nickm) Don't disable this code until we know if 2539 * the WSARecv code above works. */ 2540 void *p = evbuffer_pullup(buffer, howmuch); 2541 EVUTIL_ASSERT(p || !howmuch); 2542 n = send(fd, p, howmuch, 0); 2543 #else 2544 void *p = evbuffer_pullup(buffer, howmuch); 2545 EVUTIL_ASSERT(p || !howmuch); 2546 n = write(fd, p, howmuch); 2547 #endif 2548 #ifdef USE_SENDFILE 2549 } 2550 #endif 2551 } 2552 2553 if (n > 0) 2554 evbuffer_drain(buffer, n); 2555 2556 done: 2557 EVBUFFER_UNLOCK(buffer); 2558 return (n); 2559 } 2560 2561 int 2562 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2563 { 2564 return evbuffer_write_atmost(buffer, fd, -1); 2565 } 2566 2567 unsigned char * 2568 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2569 { 2570 unsigned char *search; 2571 struct evbuffer_ptr ptr; 2572 2573 EVBUFFER_LOCK(buffer); 2574 2575 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2576 if (ptr.pos < 0) { 2577 search = NULL; 2578 } else { 2579 search = evbuffer_pullup(buffer, ptr.pos + len); 2580 if (search) 2581 search += ptr.pos; 2582 } 2583 EVBUFFER_UNLOCK(buffer); 2584 return search; 2585 } 2586 2587 /* Subract <b>howfar</b> from the position of <b>pos</b> within 2588 * <b>buf</b>. Returns 0 on success, -1 on failure. 2589 * 2590 * This isn't exposed yet, because of potential inefficiency issues. 2591 * Maybe it should be. */ 2592 static int 2593 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 2594 size_t howfar) 2595 { 2596 if (pos->pos < 0) 2597 return -1; 2598 if (howfar > (size_t)pos->pos) 2599 return -1; 2600 if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { 2601 pos->internal_.pos_in_chain -= howfar; 2602 pos->pos -= howfar; 2603 return 0; 2604 } else { 2605 const size_t newpos = pos->pos - howfar; 2606 /* Here's the inefficient part: it walks over the 2607 * chains until we hit newpos. */ 2608 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); 2609 } 2610 } 2611 2612 int 2613 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2614 size_t position, enum evbuffer_ptr_how how) 2615 { 2616 size_t left = position; 2617 struct evbuffer_chain *chain = NULL; 2618 int result = 0; 2619 2620 EVBUFFER_LOCK(buf); 2621 2622 switch (how) { 2623 case EVBUFFER_PTR_SET: 2624 chain = buf->first; 2625 pos->pos = position; 2626 position = 0; 2627 break; 2628 case EVBUFFER_PTR_ADD: 2629 /* this avoids iterating over all previous chains if 2630 we just want to advance the position */ 2631 if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { 2632 EVBUFFER_UNLOCK(buf); 2633 return -1; 2634 } 2635 chain = pos->internal_.chain; 2636 pos->pos += position; 2637 position = pos->internal_.pos_in_chain; 2638 break; 2639 } 2640 2641 EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); 2642 while (chain && position + left >= chain->off) { 2643 left -= chain->off - position; 2644 chain = chain->next; 2645 position = 0; 2646 } 2647 if (chain) { 2648 pos->internal_.chain = chain; 2649 pos->internal_.pos_in_chain = position + left; 2650 } else if (left == 0) { 2651 /* The first byte in the (nonexistent) chain after the last chain */ 2652 pos->internal_.chain = NULL; 2653 pos->internal_.pos_in_chain = 0; 2654 } else { 2655 PTR_NOT_FOUND(pos); 2656 result = -1; 2657 } 2658 2659 EVBUFFER_UNLOCK(buf); 2660 2661 return result; 2662 } 2663 2664 /** 2665 Compare the bytes in buf at position pos to the len bytes in mem. Return 2666 less than 0, 0, or greater than 0 as memcmp. 2667 */ 2668 static int 2669 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2670 const char *mem, size_t len) 2671 { 2672 struct evbuffer_chain *chain; 2673 size_t position; 2674 int r; 2675 2676 ASSERT_EVBUFFER_LOCKED(buf); 2677 2678 if (pos->pos < 0 || 2679 EV_SIZE_MAX - len < (size_t)pos->pos || 2680 pos->pos + len > buf->total_len) 2681 return -1; 2682 2683 chain = pos->internal_.chain; 2684 position = pos->internal_.pos_in_chain; 2685 while (len && chain) { 2686 size_t n_comparable; 2687 if (len + position > chain->off) 2688 n_comparable = chain->off - position; 2689 else 2690 n_comparable = len; 2691 r = memcmp(chain->buffer + chain->misalign + position, mem, 2692 n_comparable); 2693 if (r) 2694 return r; 2695 mem += n_comparable; 2696 len -= n_comparable; 2697 position = 0; 2698 chain = chain->next; 2699 } 2700 2701 return 0; 2702 } 2703 2704 struct evbuffer_ptr 2705 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2706 { 2707 return evbuffer_search_range(buffer, what, len, start, NULL); 2708 } 2709 2710 struct evbuffer_ptr 2711 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2712 { 2713 struct evbuffer_ptr pos; 2714 struct evbuffer_chain *chain, *last_chain = NULL; 2715 const unsigned char *p; 2716 char first; 2717 2718 EVBUFFER_LOCK(buffer); 2719 2720 if (start) { 2721 memcpy(&pos, start, sizeof(pos)); 2722 chain = pos.internal_.chain; 2723 } else { 2724 pos.pos = 0; 2725 chain = pos.internal_.chain = buffer->first; 2726 pos.internal_.pos_in_chain = 0; 2727 } 2728 2729 if (end) 2730 last_chain = end->internal_.chain; 2731 2732 if (!len || len > EV_SSIZE_MAX) 2733 goto done; 2734 2735 first = what[0]; 2736 2737 while (chain) { 2738 const unsigned char *start_at = 2739 chain->buffer + chain->misalign + 2740 pos.internal_.pos_in_chain; 2741 p = memchr(start_at, first, 2742 chain->off - pos.internal_.pos_in_chain); 2743 if (p) { 2744 pos.pos += p - start_at; 2745 pos.internal_.pos_in_chain += p - start_at; 2746 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2747 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2748 goto not_found; 2749 else 2750 goto done; 2751 } 2752 ++pos.pos; 2753 ++pos.internal_.pos_in_chain; 2754 if (pos.internal_.pos_in_chain == chain->off) { 2755 chain = pos.internal_.chain = chain->next; 2756 pos.internal_.pos_in_chain = 0; 2757 } 2758 } else { 2759 if (chain == last_chain) 2760 goto not_found; 2761 pos.pos += chain->off - pos.internal_.pos_in_chain; 2762 chain = pos.internal_.chain = chain->next; 2763 pos.internal_.pos_in_chain = 0; 2764 } 2765 } 2766 2767 not_found: 2768 PTR_NOT_FOUND(&pos); 2769 done: 2770 EVBUFFER_UNLOCK(buffer); 2771 return pos; 2772 } 2773 2774 int 2775 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2776 struct evbuffer_ptr *start_at, 2777 struct evbuffer_iovec *vec, int n_vec) 2778 { 2779 struct evbuffer_chain *chain; 2780 int idx = 0; 2781 ev_ssize_t len_so_far = 0; 2782 2783 /* Avoid locking in trivial edge cases */ 2784 if (start_at && start_at->internal_.chain == NULL) 2785 return 0; 2786 2787 EVBUFFER_LOCK(buffer); 2788 2789 if (start_at) { 2790 chain = start_at->internal_.chain; 2791 len_so_far = chain->off 2792 - start_at->internal_.pos_in_chain; 2793 idx = 1; 2794 if (n_vec > 0) { 2795 vec[0].iov_base = (void *)(chain->buffer + chain->misalign 2796 + start_at->internal_.pos_in_chain); 2797 vec[0].iov_len = len_so_far; 2798 } 2799 chain = chain->next; 2800 } else { 2801 chain = buffer->first; 2802 } 2803 2804 if (n_vec == 0 && len < 0) { 2805 /* If no vectors are provided and they asked for "everything", 2806 * pretend they asked for the actual available amount. */ 2807 len = buffer->total_len; 2808 if (start_at) { 2809 len -= start_at->pos; 2810 } 2811 } 2812 2813 while (chain) { 2814 if (len >= 0 && len_so_far >= len) 2815 break; 2816 if (idx<n_vec) { 2817 vec[idx].iov_base = (void *)(chain->buffer + chain->misalign); 2818 vec[idx].iov_len = chain->off; 2819 } else if (len<0) { 2820 break; 2821 } 2822 ++idx; 2823 len_so_far += chain->off; 2824 chain = chain->next; 2825 } 2826 2827 EVBUFFER_UNLOCK(buffer); 2828 2829 return idx; 2830 } 2831 2832 2833 int 2834 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2835 { 2836 char *buffer; 2837 size_t space; 2838 int sz, result = -1; 2839 va_list aq; 2840 struct evbuffer_chain *chain; 2841 2842 2843 EVBUFFER_LOCK(buf); 2844 2845 if (buf->freeze_end) { 2846 goto done; 2847 } 2848 2849 /* make sure that at least some space is available */ 2850 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2851 goto done; 2852 2853 for (;;) { 2854 #if 0 2855 size_t used = chain->misalign + chain->off; 2856 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2857 EVUTIL_ASSERT(chain->buffer_len >= used); 2858 space = chain->buffer_len - used; 2859 #endif 2860 buffer = (char*) CHAIN_SPACE_PTR(chain); 2861 space = (size_t) CHAIN_SPACE_LEN(chain); 2862 2863 #ifndef va_copy 2864 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2865 #endif 2866 va_copy(aq, ap); 2867 2868 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2869 2870 va_end(aq); 2871 2872 if (sz < 0) 2873 goto done; 2874 if (INT_MAX >= EVBUFFER_CHAIN_MAX && 2875 (size_t)sz >= EVBUFFER_CHAIN_MAX) 2876 goto done; 2877 if ((size_t)sz < space) { 2878 chain->off += sz; 2879 buf->total_len += sz; 2880 buf->n_add_for_cb += sz; 2881 2882 advance_last_with_data(buf); 2883 evbuffer_invoke_callbacks_(buf); 2884 result = sz; 2885 goto done; 2886 } 2887 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2888 goto done; 2889 } 2890 /* NOTREACHED */ 2891 2892 done: 2893 EVBUFFER_UNLOCK(buf); 2894 return result; 2895 } 2896 2897 int 2898 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2899 { 2900 int res = -1; 2901 va_list ap; 2902 2903 va_start(ap, fmt); 2904 res = evbuffer_add_vprintf(buf, fmt, ap); 2905 va_end(ap); 2906 2907 return (res); 2908 } 2909 2910 int 2911 evbuffer_add_reference(struct evbuffer *outbuf, 2912 const void *data, size_t datlen, 2913 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2914 { 2915 struct evbuffer_chain *chain; 2916 struct evbuffer_chain_reference *info; 2917 int result = -1; 2918 2919 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2920 if (!chain) 2921 return (-1); 2922 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2923 chain->buffer = (unsigned char *)data; 2924 chain->buffer_len = datlen; 2925 chain->off = datlen; 2926 2927 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2928 info->cleanupfn = cleanupfn; 2929 info->extra = extra; 2930 2931 EVBUFFER_LOCK(outbuf); 2932 if (outbuf->freeze_end) { 2933 /* don't call chain_free; we do not want to actually invoke 2934 * the cleanup function */ 2935 mm_free(chain); 2936 goto done; 2937 } 2938 evbuffer_chain_insert(outbuf, chain); 2939 outbuf->n_add_for_cb += datlen; 2940 2941 evbuffer_invoke_callbacks_(outbuf); 2942 2943 result = 0; 2944 done: 2945 EVBUFFER_UNLOCK(outbuf); 2946 2947 return result; 2948 } 2949 2950 /* TODO(niels): we may want to add to automagically convert to mmap, in 2951 * case evbuffer_remove() or evbuffer_pullup() are being used. 2952 */ 2953 struct evbuffer_file_segment * 2954 evbuffer_file_segment_new( 2955 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2956 { 2957 struct evbuffer_file_segment *seg = 2958 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2959 if (!seg) 2960 return NULL; 2961 seg->refcnt = 1; 2962 seg->fd = fd; 2963 seg->flags = flags; 2964 seg->file_offset = offset; 2965 seg->cleanup_cb = NULL; 2966 seg->cleanup_cb_arg = NULL; 2967 #ifdef _WIN32 2968 #ifndef lseek 2969 #define lseek _lseeki64 2970 #endif 2971 #ifndef fstat 2972 #define fstat _fstat 2973 #endif 2974 #ifndef stat 2975 #define stat _stat 2976 #endif 2977 #endif 2978 if (length == -1) { 2979 struct stat st; 2980 if (fstat(fd, &st) < 0) 2981 goto err; 2982 length = st.st_size; 2983 } 2984 seg->length = length; 2985 2986 if (offset < 0 || length < 0 || 2987 ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) || 2988 (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length)) 2989 goto err; 2990 2991 #if defined(USE_SENDFILE) 2992 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 2993 seg->can_sendfile = 1; 2994 goto done; 2995 } 2996 #endif 2997 2998 if (evbuffer_file_segment_materialize(seg)<0) 2999 goto err; 3000 3001 #if defined(USE_SENDFILE) 3002 done: 3003 #endif 3004 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 3005 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 3006 } 3007 return seg; 3008 err: 3009 mm_free(seg); 3010 return NULL; 3011 } 3012 3013 #ifdef EVENT__HAVE_MMAP 3014 static long 3015 get_page_size(void) 3016 { 3017 #ifdef SC_PAGE_SIZE 3018 return sysconf(SC_PAGE_SIZE); 3019 #elif defined(_SC_PAGE_SIZE) 3020 return sysconf(_SC_PAGE_SIZE); 3021 #else 3022 return 1; 3023 #endif 3024 } 3025 #endif 3026 3027 /* DOCDOC */ 3028 /* Requires lock */ 3029 static int 3030 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) 3031 { 3032 const unsigned flags = seg->flags; 3033 const int fd = seg->fd; 3034 const ev_off_t length = seg->length; 3035 const ev_off_t offset = seg->file_offset; 3036 3037 if (seg->contents) 3038 return 0; /* already materialized */ 3039 3040 #if defined(EVENT__HAVE_MMAP) 3041 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3042 off_t offset_rounded = 0, offset_leftover = 0; 3043 void *mapped; 3044 if (offset) { 3045 /* mmap implementations don't generally like us 3046 * to have an offset that isn't a round */ 3047 long page_size = get_page_size(); 3048 if (page_size == -1) 3049 goto err; 3050 offset_leftover = offset % page_size; 3051 offset_rounded = offset - offset_leftover; 3052 } 3053 mapped = mmap(NULL, length + offset_leftover, 3054 PROT_READ, 3055 #ifdef MAP_NOCACHE 3056 MAP_NOCACHE | /* ??? */ 3057 #endif 3058 #ifdef MAP_FILE 3059 MAP_FILE | 3060 #endif 3061 MAP_PRIVATE, 3062 fd, offset_rounded); 3063 if (mapped == MAP_FAILED) { 3064 event_warn("%s: mmap(%d, %d, %zu) failed", 3065 __func__, fd, 0, (size_t)(offset + length)); 3066 } else { 3067 seg->mapping = mapped; 3068 seg->contents = (char*)mapped+offset_leftover; 3069 seg->mmap_offset = 0; 3070 seg->is_mapping = 1; 3071 goto done; 3072 } 3073 } 3074 #endif 3075 #ifdef _WIN32 3076 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3077 intptr_t h = _get_osfhandle(fd); 3078 HANDLE m; 3079 ev_uint64_t total_size = length+offset; 3080 if ((HANDLE)h == INVALID_HANDLE_VALUE) 3081 goto err; 3082 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 3083 (total_size >> 32), total_size & 0xfffffffful, 3084 NULL); 3085 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 3086 seg->mapping_handle = m; 3087 seg->mmap_offset = offset; 3088 seg->is_mapping = 1; 3089 goto done; 3090 } 3091 } 3092 #endif 3093 { 3094 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 3095 ev_off_t read_so_far = 0; 3096 char *mem; 3097 int e; 3098 ev_ssize_t n = 0; 3099 if (!(mem = mm_malloc(length))) 3100 goto err; 3101 if (start_pos < 0) { 3102 mm_free(mem); 3103 goto err; 3104 } 3105 if (lseek(fd, offset, SEEK_SET) < 0) { 3106 mm_free(mem); 3107 goto err; 3108 } 3109 while (read_so_far < length) { 3110 n = read(fd, mem+read_so_far, length-read_so_far); 3111 if (n <= 0) 3112 break; 3113 read_so_far += n; 3114 } 3115 3116 e = errno; 3117 pos = lseek(fd, start_pos, SEEK_SET); 3118 if (n < 0 || (n == 0 && length > read_so_far)) { 3119 mm_free(mem); 3120 errno = e; 3121 goto err; 3122 } else if (pos < 0) { 3123 mm_free(mem); 3124 goto err; 3125 } 3126 3127 seg->contents = mem; 3128 } 3129 3130 done: 3131 return 0; 3132 err: 3133 return -1; 3134 } 3135 3136 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, 3137 evbuffer_file_segment_cleanup_cb cb, void* arg) 3138 { 3139 EVUTIL_ASSERT(seg->refcnt > 0); 3140 seg->cleanup_cb = cb; 3141 seg->cleanup_cb_arg = arg; 3142 } 3143 3144 void 3145 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 3146 { 3147 int refcnt; 3148 EVLOCK_LOCK(seg->lock, 0); 3149 refcnt = --seg->refcnt; 3150 EVLOCK_UNLOCK(seg->lock, 0); 3151 if (refcnt > 0) 3152 return; 3153 EVUTIL_ASSERT(refcnt == 0); 3154 3155 if (seg->is_mapping) { 3156 #ifdef _WIN32 3157 CloseHandle(seg->mapping_handle); 3158 #elif defined (EVENT__HAVE_MMAP) 3159 off_t offset_leftover; 3160 offset_leftover = seg->file_offset % get_page_size(); 3161 if (munmap(seg->mapping, seg->length + offset_leftover) == -1) 3162 event_warn("%s: munmap failed", __func__); 3163 #endif 3164 } else if (seg->contents) { 3165 mm_free(seg->contents); 3166 } 3167 3168 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 3169 close(seg->fd); 3170 } 3171 3172 if (seg->cleanup_cb) { 3173 (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, 3174 seg->flags, seg->cleanup_cb_arg); 3175 seg->cleanup_cb = NULL; 3176 seg->cleanup_cb_arg = NULL; 3177 } 3178 3179 EVTHREAD_FREE_LOCK(seg->lock, 0); 3180 mm_free(seg); 3181 } 3182 3183 int 3184 evbuffer_add_file_segment(struct evbuffer *buf, 3185 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 3186 { 3187 struct evbuffer_chain *chain; 3188 struct evbuffer_chain_file_segment *extra; 3189 int can_use_sendfile = 0; 3190 3191 EVBUFFER_LOCK(buf); 3192 EVLOCK_LOCK(seg->lock, 0); 3193 if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { 3194 can_use_sendfile = 1; 3195 } else { 3196 if (!seg->contents) { 3197 if (evbuffer_file_segment_materialize(seg)<0) { 3198 EVLOCK_UNLOCK(seg->lock, 0); 3199 EVBUFFER_UNLOCK(buf); 3200 return -1; 3201 } 3202 } 3203 } 3204 ++seg->refcnt; 3205 EVLOCK_UNLOCK(seg->lock, 0); 3206 3207 if (buf->freeze_end) 3208 goto err; 3209 3210 if (length < 0) { 3211 if (offset > seg->length) 3212 goto err; 3213 length = seg->length - offset; 3214 } 3215 3216 /* Can we actually add this? */ 3217 if (offset+length > seg->length) 3218 goto err; 3219 3220 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 3221 if (!chain) 3222 goto err; 3223 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 3224 3225 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 3226 if (can_use_sendfile && seg->can_sendfile) { 3227 chain->flags |= EVBUFFER_SENDFILE; 3228 chain->misalign = seg->file_offset + offset; 3229 chain->off = length; 3230 chain->buffer_len = chain->misalign + length; 3231 } else if (seg->is_mapping) { 3232 #ifdef _WIN32 3233 ev_uint64_t total_offset = seg->mmap_offset+offset; 3234 ev_uint64_t offset_rounded=0, offset_remaining=0; 3235 LPVOID data; 3236 if (total_offset) { 3237 SYSTEM_INFO si; 3238 memset(&si, 0, sizeof(si)); /* cargo cult */ 3239 GetSystemInfo(&si); 3240 offset_remaining = total_offset % si.dwAllocationGranularity; 3241 offset_rounded = total_offset - offset_remaining; 3242 } 3243 data = MapViewOfFile( 3244 seg->mapping_handle, 3245 FILE_MAP_READ, 3246 offset_rounded >> 32, 3247 offset_rounded & 0xfffffffful, 3248 length + offset_remaining); 3249 if (data == NULL) { 3250 mm_free(chain); 3251 goto err; 3252 } 3253 chain->buffer = (unsigned char*) data; 3254 chain->buffer_len = length+offset_remaining; 3255 chain->misalign = offset_remaining; 3256 chain->off = length; 3257 #else 3258 chain->buffer = (unsigned char*)(seg->contents + offset); 3259 chain->buffer_len = length; 3260 chain->off = length; 3261 #endif 3262 } else { 3263 chain->buffer = (unsigned char*)(seg->contents + offset); 3264 chain->buffer_len = length; 3265 chain->off = length; 3266 } 3267 3268 extra->segment = seg; 3269 buf->n_add_for_cb += length; 3270 evbuffer_chain_insert(buf, chain); 3271 3272 evbuffer_invoke_callbacks_(buf); 3273 3274 EVBUFFER_UNLOCK(buf); 3275 3276 return 0; 3277 err: 3278 EVBUFFER_UNLOCK(buf); 3279 evbuffer_file_segment_free(seg); /* Lowers the refcount */ 3280 return -1; 3281 } 3282 3283 int 3284 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 3285 { 3286 struct evbuffer_file_segment *seg; 3287 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 3288 int r; 3289 3290 seg = evbuffer_file_segment_new(fd, offset, length, flags); 3291 if (!seg) 3292 return -1; 3293 r = evbuffer_add_file_segment(buf, seg, 0, length); 3294 if (r == 0) 3295 evbuffer_file_segment_free(seg); 3296 return r; 3297 } 3298 3299 void 3300 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 3301 { 3302 EVBUFFER_LOCK(buffer); 3303 3304 if (!LIST_EMPTY(&buffer->callbacks)) 3305 evbuffer_remove_all_callbacks(buffer); 3306 3307 if (cb) { 3308 struct evbuffer_cb_entry *ent = 3309 evbuffer_add_cb(buffer, NULL, cbarg); 3310 ent->cb.cb_obsolete = cb; 3311 ent->flags |= EVBUFFER_CB_OBSOLETE; 3312 } 3313 EVBUFFER_UNLOCK(buffer); 3314 } 3315 3316 struct evbuffer_cb_entry * 3317 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3318 { 3319 struct evbuffer_cb_entry *e; 3320 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 3321 return NULL; 3322 EVBUFFER_LOCK(buffer); 3323 e->cb.cb_func = cb; 3324 e->cbarg = cbarg; 3325 e->flags = EVBUFFER_CB_ENABLED; 3326 LIST_INSERT_HEAD(&buffer->callbacks, e, next); 3327 EVBUFFER_UNLOCK(buffer); 3328 return e; 3329 } 3330 3331 int 3332 evbuffer_remove_cb_entry(struct evbuffer *buffer, 3333 struct evbuffer_cb_entry *ent) 3334 { 3335 EVBUFFER_LOCK(buffer); 3336 LIST_REMOVE(ent, next); 3337 EVBUFFER_UNLOCK(buffer); 3338 mm_free(ent); 3339 return 0; 3340 } 3341 3342 int 3343 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3344 { 3345 struct evbuffer_cb_entry *cbent; 3346 int result = -1; 3347 EVBUFFER_LOCK(buffer); 3348 LIST_FOREACH(cbent, &buffer->callbacks, next) { 3349 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 3350 result = evbuffer_remove_cb_entry(buffer, cbent); 3351 goto done; 3352 } 3353 } 3354 done: 3355 EVBUFFER_UNLOCK(buffer); 3356 return result; 3357 } 3358 3359 int 3360 evbuffer_cb_set_flags(struct evbuffer *buffer, 3361 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3362 { 3363 /* the user isn't allowed to mess with these. */ 3364 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3365 EVBUFFER_LOCK(buffer); 3366 cb->flags |= flags; 3367 EVBUFFER_UNLOCK(buffer); 3368 return 0; 3369 } 3370 3371 int 3372 evbuffer_cb_clear_flags(struct evbuffer *buffer, 3373 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3374 { 3375 /* the user isn't allowed to mess with these. */ 3376 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3377 EVBUFFER_LOCK(buffer); 3378 cb->flags &= ~flags; 3379 EVBUFFER_UNLOCK(buffer); 3380 return 0; 3381 } 3382 3383 int 3384 evbuffer_freeze(struct evbuffer *buffer, int start) 3385 { 3386 EVBUFFER_LOCK(buffer); 3387 if (start) 3388 buffer->freeze_start = 1; 3389 else 3390 buffer->freeze_end = 1; 3391 EVBUFFER_UNLOCK(buffer); 3392 return 0; 3393 } 3394 3395 int 3396 evbuffer_unfreeze(struct evbuffer *buffer, int start) 3397 { 3398 EVBUFFER_LOCK(buffer); 3399 if (start) 3400 buffer->freeze_start = 0; 3401 else 3402 buffer->freeze_end = 0; 3403 EVBUFFER_UNLOCK(buffer); 3404 return 0; 3405 } 3406 3407 #if 0 3408 void 3409 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3410 { 3411 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3412 cb->size_before_suspend = evbuffer_get_length(buffer); 3413 cb->flags |= EVBUFFER_CB_SUSPENDED; 3414 } 3415 } 3416 3417 void 3418 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3419 { 3420 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3421 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3422 size_t sz = cb->size_before_suspend; 3423 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3424 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3425 cb->size_before_suspend = 0; 3426 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3427 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3428 } 3429 } 3430 } 3431 #endif 3432 3433 int 3434 evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, 3435 int max_cbs) 3436 { 3437 int r = 0; 3438 EVBUFFER_LOCK(buffer); 3439 if (buffer->deferred_cbs) { 3440 if (max_cbs < 1) { 3441 r = -1; 3442 goto done; 3443 } 3444 cbs[0] = &buffer->deferred; 3445 r = 1; 3446 } 3447 done: 3448 EVBUFFER_UNLOCK(buffer); 3449 return r; 3450 } 3451