1 /* 2 * Copyright (c) 2002-2007 Niels Provos <[email protected]> 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 30 #ifdef WIN32 31 #include <winsock2.h> 32 #include <windows.h> 33 #include <io.h> 34 #endif 35 36 #ifdef _EVENT_HAVE_VASPRINTF 37 /* If we have vasprintf, we need to define this before we include stdio.h. */ 38 #define _GNU_SOURCE 39 #endif 40 41 #include <sys/types.h> 42 43 #ifdef _EVENT_HAVE_SYS_TIME_H 44 #include <sys/time.h> 45 #endif 46 47 #ifdef _EVENT_HAVE_SYS_SOCKET_H 48 #include <sys/socket.h> 49 #endif 50 51 #ifdef _EVENT_HAVE_SYS_UIO_H 52 #include <sys/uio.h> 53 #endif 54 55 #ifdef _EVENT_HAVE_SYS_IOCTL_H 56 #include <sys/ioctl.h> 57 #endif 58 59 #ifdef _EVENT_HAVE_SYS_MMAN_H 60 #include <sys/mman.h> 61 #endif 62 63 #ifdef _EVENT_HAVE_SYS_SENDFILE_H 64 #include <sys/sendfile.h> 65 #endif 66 67 #include <errno.h> 68 #include <stdio.h> 69 #include <stdlib.h> 70 #include <string.h> 71 #ifdef _EVENT_HAVE_STDARG_H 72 #include <stdarg.h> 73 #endif 74 #ifdef _EVENT_HAVE_UNISTD_H 75 #include <unistd.h> 76 #endif 77 #include <limits.h> 78 79 #include "event2/event.h" 80 #include "event2/buffer.h" 81 #include "event2/buffer_compat.h" 82 #include "event2/bufferevent.h" 83 #include "event2/bufferevent_compat.h" 84 #include "event2/bufferevent_struct.h" 85 #include "event2/thread.h" 86 #include "event2/event-config.h" 87 #include "log-internal.h" 88 #include "mm-internal.h" 89 #include "util-internal.h" 90 #include "evthread-internal.h" 91 #include "evbuffer-internal.h" 92 #include "bufferevent-internal.h" 93 94 /* some systems do not have MAP_FAILED */ 95 #ifndef MAP_FAILED 96 #define MAP_FAILED ((void *)-1) 97 #endif 98 99 /* send file support */ 100 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__) 101 #define USE_SENDFILE 1 102 #define SENDFILE_IS_LINUX 1 103 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__) 104 #define USE_SENDFILE 1 105 #define SENDFILE_IS_FREEBSD 1 106 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__) 107 #define USE_SENDFILE 1 108 #define SENDFILE_IS_MACOSX 1 109 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 110 #define USE_SENDFILE 1 111 #define SENDFILE_IS_SOLARIS 1 112 #endif 113 114 #ifdef USE_SENDFILE 115 static int use_sendfile = 1; 116 #endif 117 #ifdef _EVENT_HAVE_MMAP 118 static int use_mmap = 1; 119 #endif 120 121 122 /* Mask of user-selectable callback flags. */ 123 #define EVBUFFER_CB_USER_FLAGS 0xffff 124 /* Mask of all internal-use-only flags. */ 125 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 126 127 /* Flag set if the callback is using the cb_obsolete function pointer */ 128 #define EVBUFFER_CB_OBSOLETE 0x00040000 129 130 /* evbuffer_chain support */ 131 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 132 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 133 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 134 135 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 136 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 137 138 static void evbuffer_chain_align(struct evbuffer_chain *chain); 139 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 140 size_t datalen); 141 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg); 142 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 143 const struct evbuffer_ptr *pos, const char *mem, size_t len); 144 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 145 size_t datlen); 146 147 #ifdef WIN32 148 static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, 149 ev_ssize_t howmuch); 150 #else 151 #define evbuffer_readfile evbuffer_read 152 #endif 153 154 static struct evbuffer_chain * 155 evbuffer_chain_new(size_t size) 156 { 157 struct evbuffer_chain *chain; 158 size_t to_alloc; 159 160 size += EVBUFFER_CHAIN_SIZE; 161 162 /* get the next largest memory that can hold the buffer */ 163 to_alloc = MIN_BUFFER_SIZE; 164 while (to_alloc < size) 165 to_alloc <<= 1; 166 167 /* we get everything in one chunk */ 168 if ((chain = mm_malloc(to_alloc)) == NULL) 169 return (NULL); 170 171 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 172 173 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 174 175 /* this way we can manipulate the buffer to different addresses, 176 * which is required for mmap for example. 177 */ 178 chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain); 179 180 return (chain); 181 } 182 183 static inline void 184 evbuffer_chain_free(struct evbuffer_chain *chain) 185 { 186 if (CHAIN_PINNED(chain)) { 187 chain->flags |= EVBUFFER_DANGLING; 188 return; 189 } 190 if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE| 191 EVBUFFER_REFERENCE)) { 192 if (chain->flags & EVBUFFER_REFERENCE) { 193 struct evbuffer_chain_reference *info = 194 EVBUFFER_CHAIN_EXTRA( 195 struct evbuffer_chain_reference, 196 chain); 197 if (info->cleanupfn) 198 (*info->cleanupfn)(chain->buffer, 199 chain->buffer_len, 200 info->extra); 201 } 202 #ifdef _EVENT_HAVE_MMAP 203 if (chain->flags & EVBUFFER_MMAP) { 204 struct evbuffer_chain_fd *info = 205 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, 206 chain); 207 if (munmap(chain->buffer, chain->buffer_len) == -1) 208 event_warn("%s: munmap failed", __func__); 209 if (close(info->fd) == -1) 210 event_warn("%s: close(%d) failed", 211 __func__, info->fd); 212 } 213 #endif 214 #ifdef USE_SENDFILE 215 if (chain->flags & EVBUFFER_SENDFILE) { 216 struct evbuffer_chain_fd *info = 217 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, 218 chain); 219 if (close(info->fd) == -1) 220 event_warn("%s: close(%d) failed", 221 __func__, info->fd); 222 } 223 #endif 224 } 225 226 mm_free(chain); 227 } 228 229 static void 230 evbuffer_free_all_chains(struct evbuffer_chain *chain) 231 { 232 struct evbuffer_chain *next; 233 for (; chain; chain = next) { 234 next = chain->next; 235 evbuffer_chain_free(chain); 236 } 237 } 238 239 #ifndef NDEBUG 240 static int 241 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 242 { 243 for (; chain; chain = chain->next) { 244 if (chain->off) 245 return 0; 246 } 247 return 1; 248 } 249 #else 250 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 251 "unused variable" warnings. */ 252 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 253 return 1; 254 } 255 #endif 256 257 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 258 * to replacing them all with a new chain. Return a pointer to the place 259 * where the new chain will go. 260 * 261 * Internal; requires lock. The caller must fix up buf->last and buf->first 262 * as needed; they might have been freed. 263 */ 264 static struct evbuffer_chain ** 265 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 266 { 267 struct evbuffer_chain **ch = buf->last_with_datap; 268 /* Find the first victim chain. It might be *last_with_datap */ 269 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 270 ch = &(*ch)->next; 271 if (*ch) { 272 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 273 evbuffer_free_all_chains(*ch); 274 *ch = NULL; 275 } 276 return ch; 277 } 278 279 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 280 * chains as necessary. Requires lock. Does not schedule callbacks. 281 */ 282 static void 283 evbuffer_chain_insert(struct evbuffer *buf, 284 struct evbuffer_chain *chain) 285 { 286 ASSERT_EVBUFFER_LOCKED(buf); 287 if (*buf->last_with_datap == NULL) { 288 /* There are no chains data on the buffer at all. */ 289 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 290 EVUTIL_ASSERT(buf->first == NULL); 291 buf->first = buf->last = chain; 292 } else { 293 struct evbuffer_chain **ch = buf->last_with_datap; 294 /* Find the first victim chain. It might be *last_with_datap */ 295 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 296 ch = &(*ch)->next; 297 if (*ch == NULL) { 298 /* There is no victim; just append this new chain. */ 299 buf->last->next = chain; 300 if (chain->off) 301 buf->last_with_datap = &buf->last->next; 302 } else { 303 /* Replace all victim chains with this chain. */ 304 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 305 evbuffer_free_all_chains(*ch); 306 *ch = chain; 307 } 308 buf->last = chain; 309 } 310 buf->total_len += chain->off; 311 } 312 313 static inline struct evbuffer_chain * 314 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 315 { 316 struct evbuffer_chain *chain; 317 if ((chain = evbuffer_chain_new(datlen)) == NULL) 318 return NULL; 319 evbuffer_chain_insert(buf, chain); 320 return chain; 321 } 322 323 void 324 _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag) 325 { 326 EVUTIL_ASSERT((chain->flags & flag) == 0); 327 chain->flags |= flag; 328 } 329 330 void 331 _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag) 332 { 333 EVUTIL_ASSERT((chain->flags & flag) != 0); 334 chain->flags &= ~flag; 335 if (chain->flags & EVBUFFER_DANGLING) 336 evbuffer_chain_free(chain); 337 } 338 339 struct evbuffer * 340 evbuffer_new(void) 341 { 342 struct evbuffer *buffer; 343 344 buffer = mm_calloc(1, sizeof(struct evbuffer)); 345 if (buffer == NULL) 346 return (NULL); 347 348 TAILQ_INIT(&buffer->callbacks); 349 buffer->refcnt = 1; 350 buffer->last_with_datap = &buffer->first; 351 352 return (buffer); 353 } 354 355 int 356 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 357 { 358 EVBUFFER_LOCK(buf); 359 buf->flags |= (ev_uint32_t)flags; 360 EVBUFFER_UNLOCK(buf); 361 return 0; 362 } 363 364 int 365 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 366 { 367 EVBUFFER_LOCK(buf); 368 buf->flags &= ~(ev_uint32_t)flags; 369 EVBUFFER_UNLOCK(buf); 370 return 0; 371 } 372 373 void 374 _evbuffer_incref(struct evbuffer *buf) 375 { 376 EVBUFFER_LOCK(buf); 377 ++buf->refcnt; 378 EVBUFFER_UNLOCK(buf); 379 } 380 381 void 382 _evbuffer_incref_and_lock(struct evbuffer *buf) 383 { 384 EVBUFFER_LOCK(buf); 385 ++buf->refcnt; 386 } 387 388 int 389 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 390 { 391 EVBUFFER_LOCK(buffer); 392 buffer->cb_queue = event_base_get_deferred_cb_queue(base); 393 buffer->deferred_cbs = 1; 394 event_deferred_cb_init(&buffer->deferred, 395 evbuffer_deferred_callback, buffer); 396 EVBUFFER_UNLOCK(buffer); 397 return 0; 398 } 399 400 int 401 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 402 { 403 #ifdef _EVENT_DISABLE_THREAD_SUPPORT 404 return -1; 405 #else 406 if (buf->lock) 407 return -1; 408 409 if (!lock) { 410 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 411 if (!lock) 412 return -1; 413 buf->lock = lock; 414 buf->own_lock = 1; 415 } else { 416 buf->lock = lock; 417 buf->own_lock = 0; 418 } 419 420 return 0; 421 #endif 422 } 423 424 void 425 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev) 426 { 427 EVBUFFER_LOCK(buf); 428 buf->parent = bev; 429 EVBUFFER_UNLOCK(buf); 430 } 431 432 static void 433 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 434 { 435 struct evbuffer_cb_entry *cbent, *next; 436 struct evbuffer_cb_info info; 437 size_t new_size; 438 ev_uint32_t mask, masked_val; 439 int clear = 1; 440 441 if (running_deferred) { 442 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 443 masked_val = EVBUFFER_CB_ENABLED; 444 } else if (buffer->deferred_cbs) { 445 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 446 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 447 /* Don't zero-out n_add/n_del, since the deferred callbacks 448 will want to see them. */ 449 clear = 0; 450 } else { 451 mask = EVBUFFER_CB_ENABLED; 452 masked_val = EVBUFFER_CB_ENABLED; 453 } 454 455 ASSERT_EVBUFFER_LOCKED(buffer); 456 457 if (TAILQ_EMPTY(&buffer->callbacks)) { 458 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 459 return; 460 } 461 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 462 return; 463 464 new_size = buffer->total_len; 465 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 466 info.n_added = buffer->n_add_for_cb; 467 info.n_deleted = buffer->n_del_for_cb; 468 if (clear) { 469 buffer->n_add_for_cb = 0; 470 buffer->n_del_for_cb = 0; 471 } 472 for (cbent = TAILQ_FIRST(&buffer->callbacks); 473 cbent != TAILQ_END(&buffer->callbacks); 474 cbent = next) { 475 /* Get the 'next' pointer now in case this callback decides 476 * to remove itself or something. */ 477 next = TAILQ_NEXT(cbent, next); 478 479 if ((cbent->flags & mask) != masked_val) 480 continue; 481 482 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 483 cbent->cb.cb_obsolete(buffer, 484 info.orig_size, new_size, cbent->cbarg); 485 else 486 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 487 } 488 } 489 490 void 491 evbuffer_invoke_callbacks(struct evbuffer *buffer) 492 { 493 if (TAILQ_EMPTY(&buffer->callbacks)) { 494 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 495 return; 496 } 497 498 if (buffer->deferred_cbs) { 499 if (buffer->deferred.queued) 500 return; 501 _evbuffer_incref_and_lock(buffer); 502 if (buffer->parent) 503 bufferevent_incref(buffer->parent); 504 EVBUFFER_UNLOCK(buffer); 505 event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred); 506 } 507 508 evbuffer_run_callbacks(buffer, 0); 509 } 510 511 static void 512 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg) 513 { 514 struct bufferevent *parent = NULL; 515 struct evbuffer *buffer = arg; 516 517 /* XXXX It would be better to run these callbacks without holding the 518 * lock */ 519 EVBUFFER_LOCK(buffer); 520 parent = buffer->parent; 521 evbuffer_run_callbacks(buffer, 1); 522 _evbuffer_decref_and_unlock(buffer); 523 if (parent) 524 bufferevent_decref(parent); 525 } 526 527 static void 528 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 529 { 530 struct evbuffer_cb_entry *cbent; 531 532 while ((cbent = TAILQ_FIRST(&buffer->callbacks))) { 533 TAILQ_REMOVE(&buffer->callbacks, cbent, next); 534 mm_free(cbent); 535 } 536 } 537 538 void 539 _evbuffer_decref_and_unlock(struct evbuffer *buffer) 540 { 541 struct evbuffer_chain *chain, *next; 542 ASSERT_EVBUFFER_LOCKED(buffer); 543 544 EVUTIL_ASSERT(buffer->refcnt > 0); 545 546 if (--buffer->refcnt > 0) { 547 EVBUFFER_UNLOCK(buffer); 548 return; 549 } 550 551 for (chain = buffer->first; chain != NULL; chain = next) { 552 next = chain->next; 553 evbuffer_chain_free(chain); 554 } 555 evbuffer_remove_all_callbacks(buffer); 556 if (buffer->deferred_cbs) 557 event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred); 558 559 EVBUFFER_UNLOCK(buffer); 560 if (buffer->own_lock) 561 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 562 mm_free(buffer); 563 } 564 565 void 566 evbuffer_free(struct evbuffer *buffer) 567 { 568 EVBUFFER_LOCK(buffer); 569 _evbuffer_decref_and_unlock(buffer); 570 } 571 572 void 573 evbuffer_lock(struct evbuffer *buf) 574 { 575 EVBUFFER_LOCK(buf); 576 } 577 578 void 579 evbuffer_unlock(struct evbuffer *buf) 580 { 581 EVBUFFER_UNLOCK(buf); 582 } 583 584 size_t 585 evbuffer_get_length(const struct evbuffer *buffer) 586 { 587 size_t result; 588 589 EVBUFFER_LOCK(buffer); 590 591 result = (buffer->total_len); 592 593 EVBUFFER_UNLOCK(buffer); 594 595 return result; 596 } 597 598 size_t 599 evbuffer_get_contiguous_space(const struct evbuffer *buf) 600 { 601 struct evbuffer_chain *chain; 602 size_t result; 603 604 EVBUFFER_LOCK(buf); 605 chain = buf->first; 606 result = (chain != NULL ? chain->off : 0); 607 EVBUFFER_UNLOCK(buf); 608 609 return result; 610 } 611 612 int 613 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 614 struct evbuffer_iovec *vec, int n_vecs) 615 { 616 struct evbuffer_chain *chain, **chainp; 617 int n = -1; 618 619 EVBUFFER_LOCK(buf); 620 if (buf->freeze_end) 621 goto done; 622 if (n_vecs < 1) 623 goto done; 624 if (n_vecs == 1) { 625 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 626 goto done; 627 628 vec[0].iov_base = CHAIN_SPACE_PTR(chain); 629 vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain); 630 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 631 n = 1; 632 } else { 633 if (_evbuffer_expand_fast(buf, size, n_vecs)<0) 634 goto done; 635 n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs, 636 &chainp, 0); 637 } 638 639 done: 640 EVBUFFER_UNLOCK(buf); 641 return n; 642 643 } 644 645 static int 646 advance_last_with_data(struct evbuffer *buf) 647 { 648 int n = 0; 649 ASSERT_EVBUFFER_LOCKED(buf); 650 651 if (!*buf->last_with_datap) 652 return 0; 653 654 while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) { 655 buf->last_with_datap = &(*buf->last_with_datap)->next; 656 ++n; 657 } 658 return n; 659 } 660 661 int 662 evbuffer_commit_space(struct evbuffer *buf, 663 struct evbuffer_iovec *vec, int n_vecs) 664 { 665 struct evbuffer_chain *chain, **firstchainp, **chainp; 666 int result = -1; 667 size_t added = 0; 668 int i; 669 670 EVBUFFER_LOCK(buf); 671 672 if (buf->freeze_end) 673 goto done; 674 if (n_vecs == 0) { 675 result = 0; 676 goto done; 677 } else if (n_vecs == 1 && 678 (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) { 679 /* The user only got or used one chain; it might not 680 * be the first one with space in it. */ 681 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 682 goto done; 683 buf->last->off += vec[0].iov_len; 684 added = vec[0].iov_len; 685 if (added) 686 advance_last_with_data(buf); 687 goto okay; 688 } 689 690 /* Advance 'firstchain' to the first chain with space in it. */ 691 firstchainp = buf->last_with_datap; 692 if (!*firstchainp) 693 goto done; 694 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 695 firstchainp = &(*firstchainp)->next; 696 } 697 698 chain = *firstchainp; 699 /* pass 1: make sure that the pointers and lengths of vecs[] are in 700 * bounds before we try to commit anything. */ 701 for (i=0; i<n_vecs; ++i) { 702 if (!chain) 703 goto done; 704 if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) || 705 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 706 goto done; 707 chain = chain->next; 708 } 709 /* pass 2: actually adjust all the chains. */ 710 chainp = firstchainp; 711 for (i=0; i<n_vecs; ++i) { 712 (*chainp)->off += vec[i].iov_len; 713 added += vec[i].iov_len; 714 if (vec[i].iov_len) { 715 buf->last_with_datap = chainp; 716 } 717 chainp = &(*chainp)->next; 718 } 719 720 okay: 721 buf->total_len += added; 722 buf->n_add_for_cb += added; 723 result = 0; 724 evbuffer_invoke_callbacks(buf); 725 726 done: 727 EVBUFFER_UNLOCK(buf); 728 return result; 729 } 730 731 static inline int 732 HAS_PINNED_R(struct evbuffer *buf) 733 { 734 return (buf->last && CHAIN_PINNED_R(buf->last)); 735 } 736 737 static inline void 738 ZERO_CHAIN(struct evbuffer *dst) 739 { 740 ASSERT_EVBUFFER_LOCKED(dst); 741 dst->first = NULL; 742 dst->last = NULL; 743 dst->last_with_datap = &(dst)->first; 744 dst->total_len = 0; 745 } 746 747 /* Prepares the contents of src to be moved to another buffer by removing 748 * read-pinned chains. The first pinned chain is saved in first, and the 749 * last in last. If src has no read-pinned chains, first and last are set 750 * to NULL. */ 751 static int 752 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 753 struct evbuffer_chain **last) 754 { 755 struct evbuffer_chain *chain, **pinned; 756 757 ASSERT_EVBUFFER_LOCKED(src); 758 759 if (!HAS_PINNED_R(src)) { 760 *first = *last = NULL; 761 return 0; 762 } 763 764 pinned = src->last_with_datap; 765 if (!CHAIN_PINNED_R(*pinned)) 766 pinned = &(*pinned)->next; 767 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 768 chain = *first = *pinned; 769 *last = src->last; 770 771 /* If there's data in the first pinned chain, we need to allocate 772 * a new chain and copy the data over. */ 773 if (chain->off) { 774 struct evbuffer_chain *tmp; 775 776 EVUTIL_ASSERT(pinned == src->last_with_datap); 777 tmp = evbuffer_chain_new(chain->off); 778 if (!tmp) 779 return -1; 780 memcpy(tmp->buffer, chain->buffer + chain->misalign, 781 chain->off); 782 tmp->off = chain->off; 783 *src->last_with_datap = tmp; 784 src->last = tmp; 785 chain->misalign += chain->off; 786 chain->off = 0; 787 } else { 788 src->last = *src->last_with_datap; 789 *pinned = NULL; 790 } 791 792 return 0; 793 } 794 795 static inline void 796 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 797 struct evbuffer_chain *last) 798 { 799 ASSERT_EVBUFFER_LOCKED(src); 800 801 if (!pinned) { 802 ZERO_CHAIN(src); 803 return; 804 } 805 806 src->first = pinned; 807 src->last = last; 808 src->last_with_datap = &src->first; 809 src->total_len = 0; 810 } 811 812 static inline void 813 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 814 { 815 ASSERT_EVBUFFER_LOCKED(dst); 816 ASSERT_EVBUFFER_LOCKED(src); 817 dst->first = src->first; 818 if (src->last_with_datap == &src->first) 819 dst->last_with_datap = &dst->first; 820 else 821 dst->last_with_datap = src->last_with_datap; 822 dst->last = src->last; 823 dst->total_len = src->total_len; 824 } 825 826 static void 827 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 828 { 829 ASSERT_EVBUFFER_LOCKED(dst); 830 ASSERT_EVBUFFER_LOCKED(src); 831 dst->last->next = src->first; 832 if (src->last_with_datap == &src->first) 833 dst->last_with_datap = &dst->last->next; 834 else 835 dst->last_with_datap = src->last_with_datap; 836 dst->last = src->last; 837 dst->total_len += src->total_len; 838 } 839 840 static void 841 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 842 { 843 ASSERT_EVBUFFER_LOCKED(dst); 844 ASSERT_EVBUFFER_LOCKED(src); 845 src->last->next = dst->first; 846 dst->first = src->first; 847 dst->total_len += src->total_len; 848 if (*dst->last_with_datap == NULL) { 849 if (src->last_with_datap == &(src)->first) 850 dst->last_with_datap = &dst->first; 851 else 852 dst->last_with_datap = src->last_with_datap; 853 } else if (dst->last_with_datap == &dst->first) { 854 dst->last_with_datap = &src->last->next; 855 } 856 } 857 858 int 859 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 860 { 861 struct evbuffer_chain *pinned, *last; 862 size_t in_total_len, out_total_len; 863 int result = 0; 864 865 EVBUFFER_LOCK2(inbuf, outbuf); 866 in_total_len = inbuf->total_len; 867 out_total_len = outbuf->total_len; 868 869 if (in_total_len == 0 || outbuf == inbuf) 870 goto done; 871 872 if (outbuf->freeze_end || inbuf->freeze_start) { 873 result = -1; 874 goto done; 875 } 876 877 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 878 result = -1; 879 goto done; 880 } 881 882 if (out_total_len == 0) { 883 /* There might be an empty chain at the start of outbuf; free 884 * it. */ 885 evbuffer_free_all_chains(outbuf->first); 886 COPY_CHAIN(outbuf, inbuf); 887 } else { 888 APPEND_CHAIN(outbuf, inbuf); 889 } 890 891 RESTORE_PINNED(inbuf, pinned, last); 892 893 inbuf->n_del_for_cb += in_total_len; 894 outbuf->n_add_for_cb += in_total_len; 895 896 evbuffer_invoke_callbacks(inbuf); 897 evbuffer_invoke_callbacks(outbuf); 898 899 done: 900 EVBUFFER_UNLOCK2(inbuf, outbuf); 901 return result; 902 } 903 904 int 905 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 906 { 907 struct evbuffer_chain *pinned, *last; 908 size_t in_total_len, out_total_len; 909 int result = 0; 910 911 EVBUFFER_LOCK2(inbuf, outbuf); 912 913 in_total_len = inbuf->total_len; 914 out_total_len = outbuf->total_len; 915 916 if (!in_total_len || inbuf == outbuf) 917 goto done; 918 919 if (outbuf->freeze_start || inbuf->freeze_start) { 920 result = -1; 921 goto done; 922 } 923 924 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 925 result = -1; 926 goto done; 927 } 928 929 if (out_total_len == 0) { 930 /* There might be an empty chain at the start of outbuf; free 931 * it. */ 932 evbuffer_free_all_chains(outbuf->first); 933 COPY_CHAIN(outbuf, inbuf); 934 } else { 935 PREPEND_CHAIN(outbuf, inbuf); 936 } 937 938 RESTORE_PINNED(inbuf, pinned, last); 939 940 inbuf->n_del_for_cb += in_total_len; 941 outbuf->n_add_for_cb += in_total_len; 942 943 evbuffer_invoke_callbacks(inbuf); 944 evbuffer_invoke_callbacks(outbuf); 945 done: 946 EVBUFFER_UNLOCK2(inbuf, outbuf); 947 return result; 948 } 949 950 int 951 evbuffer_drain(struct evbuffer *buf, size_t len) 952 { 953 struct evbuffer_chain *chain, *next; 954 size_t remaining, old_len; 955 int result = 0; 956 957 EVBUFFER_LOCK(buf); 958 old_len = buf->total_len; 959 960 if (old_len == 0) 961 goto done; 962 963 if (buf->freeze_start) { 964 result = -1; 965 goto done; 966 } 967 968 if (len >= old_len && !HAS_PINNED_R(buf)) { 969 len = old_len; 970 for (chain = buf->first; chain != NULL; chain = next) { 971 next = chain->next; 972 evbuffer_chain_free(chain); 973 } 974 975 ZERO_CHAIN(buf); 976 } else { 977 if (len >= old_len) 978 len = old_len; 979 980 buf->total_len -= len; 981 remaining = len; 982 for (chain = buf->first; 983 remaining >= chain->off; 984 chain = next) { 985 next = chain->next; 986 remaining -= chain->off; 987 988 if (chain == *buf->last_with_datap) { 989 buf->last_with_datap = &buf->first; 990 } 991 if (&chain->next == buf->last_with_datap) 992 buf->last_with_datap = &buf->first; 993 994 if (CHAIN_PINNED_R(chain)) { 995 EVUTIL_ASSERT(remaining == 0); 996 chain->misalign += chain->off; 997 chain->off = 0; 998 break; 999 } else 1000 evbuffer_chain_free(chain); 1001 } 1002 1003 buf->first = chain; 1004 if (chain) { 1005 chain->misalign += remaining; 1006 chain->off -= remaining; 1007 } 1008 } 1009 1010 buf->n_del_for_cb += len; 1011 /* Tell someone about changes in this buffer */ 1012 evbuffer_invoke_callbacks(buf); 1013 1014 done: 1015 EVBUFFER_UNLOCK(buf); 1016 return result; 1017 } 1018 1019 /* Reads data from an event buffer and drains the bytes read */ 1020 int 1021 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1022 { 1023 ev_ssize_t n; 1024 EVBUFFER_LOCK(buf); 1025 n = evbuffer_copyout(buf, data_out, datlen); 1026 if (n > 0) { 1027 if (evbuffer_drain(buf, n)<0) 1028 n = -1; 1029 } 1030 EVBUFFER_UNLOCK(buf); 1031 return (int)n; 1032 } 1033 1034 ev_ssize_t 1035 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1036 { 1037 /*XXX fails badly on sendfile case. */ 1038 struct evbuffer_chain *chain; 1039 char *data = data_out; 1040 size_t nread; 1041 ev_ssize_t result = 0; 1042 1043 EVBUFFER_LOCK(buf); 1044 1045 chain = buf->first; 1046 1047 if (datlen >= buf->total_len) 1048 datlen = buf->total_len; 1049 1050 if (datlen == 0) 1051 goto done; 1052 1053 if (buf->freeze_start) { 1054 result = -1; 1055 goto done; 1056 } 1057 1058 nread = datlen; 1059 1060 while (datlen && datlen >= chain->off) { 1061 memcpy(data, chain->buffer + chain->misalign, chain->off); 1062 data += chain->off; 1063 datlen -= chain->off; 1064 1065 chain = chain->next; 1066 EVUTIL_ASSERT(chain || datlen==0); 1067 } 1068 1069 if (datlen) { 1070 EVUTIL_ASSERT(chain); 1071 memcpy(data, chain->buffer + chain->misalign, datlen); 1072 } 1073 1074 result = nread; 1075 done: 1076 EVBUFFER_UNLOCK(buf); 1077 return result; 1078 } 1079 1080 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1081 * possible. */ 1082 /* XXXX should return ev_ssize_t */ 1083 int 1084 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1085 size_t datlen) 1086 { 1087 /*XXX We should have an option to force this to be zero-copy.*/ 1088 1089 /*XXX can fail badly on sendfile case. */ 1090 struct evbuffer_chain *chain, *previous; 1091 size_t nread = 0; 1092 int result; 1093 1094 EVBUFFER_LOCK2(src, dst); 1095 1096 chain = previous = src->first; 1097 1098 if (datlen == 0 || dst == src) { 1099 result = 0; 1100 goto done; 1101 } 1102 1103 if (dst->freeze_end || src->freeze_start) { 1104 result = -1; 1105 goto done; 1106 } 1107 1108 /* short-cut if there is no more data buffered */ 1109 if (datlen >= src->total_len) { 1110 datlen = src->total_len; 1111 evbuffer_add_buffer(dst, src); 1112 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1113 goto done; 1114 } 1115 1116 /* removes chains if possible */ 1117 while (chain->off <= datlen) { 1118 /* We can't remove the last with data from src unless we 1119 * remove all chains, in which case we would have done the if 1120 * block above */ 1121 EVUTIL_ASSERT(chain != *src->last_with_datap); 1122 nread += chain->off; 1123 datlen -= chain->off; 1124 previous = chain; 1125 if (src->last_with_datap == &chain->next) 1126 src->last_with_datap = &src->first; 1127 chain = chain->next; 1128 } 1129 1130 if (nread) { 1131 /* we can remove the chain */ 1132 struct evbuffer_chain **chp; 1133 chp = evbuffer_free_trailing_empty_chains(dst); 1134 1135 if (dst->first == NULL) { 1136 dst->first = src->first; 1137 } else { 1138 *chp = src->first; 1139 } 1140 dst->last = previous; 1141 previous->next = NULL; 1142 src->first = chain; 1143 advance_last_with_data(dst); 1144 1145 dst->total_len += nread; 1146 dst->n_add_for_cb += nread; 1147 } 1148 1149 /* we know that there is more data in the src buffer than 1150 * we want to read, so we manually drain the chain */ 1151 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1152 chain->misalign += datlen; 1153 chain->off -= datlen; 1154 nread += datlen; 1155 1156 /* You might think we would want to increment dst->n_add_for_cb 1157 * here too. But evbuffer_add above already took care of that. 1158 */ 1159 src->total_len -= nread; 1160 src->n_del_for_cb += nread; 1161 1162 if (nread) { 1163 evbuffer_invoke_callbacks(dst); 1164 evbuffer_invoke_callbacks(src); 1165 } 1166 result = (int)nread;/*XXXX should change return type */ 1167 1168 done: 1169 EVBUFFER_UNLOCK2(src, dst); 1170 return result; 1171 } 1172 1173 unsigned char * 1174 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1175 { 1176 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1177 unsigned char *buffer, *result = NULL; 1178 ev_ssize_t remaining; 1179 int removed_last_with_data = 0; 1180 int removed_last_with_datap = 0; 1181 1182 EVBUFFER_LOCK(buf); 1183 1184 chain = buf->first; 1185 1186 if (size < 0) 1187 size = buf->total_len; 1188 /* if size > buf->total_len, we cannot guarantee to the user that she 1189 * is going to have a long enough buffer afterwards; so we return 1190 * NULL */ 1191 if (size == 0 || (size_t)size > buf->total_len) 1192 goto done; 1193 1194 /* No need to pull up anything; the first size bytes are 1195 * already here. */ 1196 if (chain->off >= (size_t)size) { 1197 result = chain->buffer + chain->misalign; 1198 goto done; 1199 } 1200 1201 /* Make sure that none of the chains we need to copy from is pinned. */ 1202 remaining = size - chain->off; 1203 EVUTIL_ASSERT(remaining >= 0); 1204 for (tmp=chain->next; tmp; tmp=tmp->next) { 1205 if (CHAIN_PINNED(tmp)) 1206 goto done; 1207 if (tmp->off >= (size_t)remaining) 1208 break; 1209 remaining -= tmp->off; 1210 } 1211 1212 if (CHAIN_PINNED(chain)) { 1213 size_t old_off = chain->off; 1214 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1215 /* not enough room at end of chunk. */ 1216 goto done; 1217 } 1218 buffer = CHAIN_SPACE_PTR(chain); 1219 tmp = chain; 1220 tmp->off = size; 1221 size -= old_off; 1222 chain = chain->next; 1223 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1224 /* already have enough space in the first chain */ 1225 size_t old_off = chain->off; 1226 buffer = chain->buffer + chain->misalign + chain->off; 1227 tmp = chain; 1228 tmp->off = size; 1229 size -= old_off; 1230 chain = chain->next; 1231 } else { 1232 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1233 event_warn("%s: out of memory", __func__); 1234 goto done; 1235 } 1236 buffer = tmp->buffer; 1237 tmp->off = size; 1238 buf->first = tmp; 1239 } 1240 1241 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1242 1243 /* Copy and free every chunk that will be entirely pulled into tmp */ 1244 last_with_data = *buf->last_with_datap; 1245 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1246 next = chain->next; 1247 1248 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1249 size -= chain->off; 1250 buffer += chain->off; 1251 if (chain == last_with_data) 1252 removed_last_with_data = 1; 1253 if (&chain->next == buf->last_with_datap) 1254 removed_last_with_datap = 1; 1255 1256 evbuffer_chain_free(chain); 1257 } 1258 1259 if (chain != NULL) { 1260 memcpy(buffer, chain->buffer + chain->misalign, size); 1261 chain->misalign += size; 1262 chain->off -= size; 1263 } else { 1264 buf->last = tmp; 1265 } 1266 1267 tmp->next = chain; 1268 1269 if (removed_last_with_data) { 1270 buf->last_with_datap = &buf->first; 1271 } else if (removed_last_with_datap) { 1272 if (buf->first->next && buf->first->next->off) 1273 buf->last_with_datap = &buf->first->next; 1274 else 1275 buf->last_with_datap = &buf->first; 1276 } 1277 1278 result = (tmp->buffer + tmp->misalign); 1279 1280 done: 1281 EVBUFFER_UNLOCK(buf); 1282 return result; 1283 } 1284 1285 /* 1286 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1287 * The returned buffer needs to be freed by the called. 1288 */ 1289 char * 1290 evbuffer_readline(struct evbuffer *buffer) 1291 { 1292 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1293 } 1294 1295 static inline ev_ssize_t 1296 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1297 { 1298 struct evbuffer_chain *chain = it->_internal.chain; 1299 size_t i = it->_internal.pos_in_chain; 1300 while (chain != NULL) { 1301 char *buffer = (char *)chain->buffer + chain->misalign; 1302 char *cp = memchr(buffer+i, chr, chain->off-i); 1303 if (cp) { 1304 it->_internal.chain = chain; 1305 it->_internal.pos_in_chain = cp - buffer; 1306 it->pos += (cp - buffer - i); 1307 return it->pos; 1308 } 1309 it->pos += chain->off - i; 1310 i = 0; 1311 chain = chain->next; 1312 } 1313 1314 return (-1); 1315 } 1316 1317 static inline char * 1318 find_eol_char(char *s, size_t len) 1319 { 1320 #define CHUNK_SZ 128 1321 /* Lots of benchmarking found this approach to be faster in practice 1322 * than doing two memchrs over the whole buffer, doin a memchr on each 1323 * char of the buffer, or trying to emulate memchr by hand. */ 1324 char *s_end, *cr, *lf; 1325 s_end = s+len; 1326 while (s < s_end) { 1327 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1328 cr = memchr(s, '\r', chunk); 1329 lf = memchr(s, '\n', chunk); 1330 if (cr) { 1331 if (lf && lf < cr) 1332 return lf; 1333 return cr; 1334 } else if (lf) { 1335 return lf; 1336 } 1337 s += CHUNK_SZ; 1338 } 1339 1340 return NULL; 1341 #undef CHUNK_SZ 1342 } 1343 1344 static ev_ssize_t 1345 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1346 { 1347 struct evbuffer_chain *chain = it->_internal.chain; 1348 size_t i = it->_internal.pos_in_chain; 1349 while (chain != NULL) { 1350 char *buffer = (char *)chain->buffer + chain->misalign; 1351 char *cp = find_eol_char(buffer+i, chain->off-i); 1352 if (cp) { 1353 it->_internal.chain = chain; 1354 it->_internal.pos_in_chain = cp - buffer; 1355 it->pos += (cp - buffer) - i; 1356 return it->pos; 1357 } 1358 it->pos += chain->off - i; 1359 i = 0; 1360 chain = chain->next; 1361 } 1362 1363 return (-1); 1364 } 1365 1366 static inline int 1367 evbuffer_strspn( 1368 struct evbuffer_ptr *ptr, const char *chrset) 1369 { 1370 int count = 0; 1371 struct evbuffer_chain *chain = ptr->_internal.chain; 1372 size_t i = ptr->_internal.pos_in_chain; 1373 1374 if (!chain) 1375 return -1; 1376 1377 while (1) { 1378 char *buffer = (char *)chain->buffer + chain->misalign; 1379 for (; i < chain->off; ++i) { 1380 const char *p = chrset; 1381 while (*p) { 1382 if (buffer[i] == *p++) 1383 goto next; 1384 } 1385 ptr->_internal.chain = chain; 1386 ptr->_internal.pos_in_chain = i; 1387 ptr->pos += count; 1388 return count; 1389 next: 1390 ++count; 1391 } 1392 i = 0; 1393 1394 if (! chain->next) { 1395 ptr->_internal.chain = chain; 1396 ptr->_internal.pos_in_chain = i; 1397 ptr->pos += count; 1398 return count; 1399 } 1400 1401 chain = chain->next; 1402 } 1403 } 1404 1405 1406 static inline char 1407 evbuffer_getchr(struct evbuffer_ptr *it) 1408 { 1409 struct evbuffer_chain *chain = it->_internal.chain; 1410 size_t off = it->_internal.pos_in_chain; 1411 1412 return chain->buffer[chain->misalign + off]; 1413 } 1414 1415 struct evbuffer_ptr 1416 evbuffer_search_eol(struct evbuffer *buffer, 1417 struct evbuffer_ptr *start, size_t *eol_len_out, 1418 enum evbuffer_eol_style eol_style) 1419 { 1420 struct evbuffer_ptr it, it2; 1421 size_t extra_drain = 0; 1422 int ok = 0; 1423 1424 EVBUFFER_LOCK(buffer); 1425 1426 if (start) { 1427 memcpy(&it, start, sizeof(it)); 1428 } else { 1429 it.pos = 0; 1430 it._internal.chain = buffer->first; 1431 it._internal.pos_in_chain = 0; 1432 } 1433 1434 /* the eol_style determines our first stop character and how many 1435 * characters we are going to drain afterwards. */ 1436 switch (eol_style) { 1437 case EVBUFFER_EOL_ANY: 1438 if (evbuffer_find_eol_char(&it) < 0) 1439 goto done; 1440 memcpy(&it2, &it, sizeof(it)); 1441 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1442 break; 1443 case EVBUFFER_EOL_CRLF_STRICT: { 1444 it = evbuffer_search(buffer, "\r\n", 2, &it); 1445 if (it.pos < 0) 1446 goto done; 1447 extra_drain = 2; 1448 break; 1449 } 1450 case EVBUFFER_EOL_CRLF: 1451 while (1) { 1452 if (evbuffer_find_eol_char(&it) < 0) 1453 goto done; 1454 if (evbuffer_getchr(&it) == '\n') { 1455 extra_drain = 1; 1456 break; 1457 } else if (!evbuffer_ptr_memcmp( 1458 buffer, &it, "\r\n", 2)) { 1459 extra_drain = 2; 1460 break; 1461 } else { 1462 if (evbuffer_ptr_set(buffer, &it, 1, 1463 EVBUFFER_PTR_ADD)<0) 1464 goto done; 1465 } 1466 } 1467 break; 1468 case EVBUFFER_EOL_LF: 1469 if (evbuffer_strchr(&it, '\n') < 0) 1470 goto done; 1471 extra_drain = 1; 1472 break; 1473 default: 1474 goto done; 1475 } 1476 1477 ok = 1; 1478 done: 1479 EVBUFFER_UNLOCK(buffer); 1480 1481 if (!ok) { 1482 it.pos = -1; 1483 } 1484 if (eol_len_out) 1485 *eol_len_out = extra_drain; 1486 1487 return it; 1488 } 1489 1490 char * 1491 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1492 enum evbuffer_eol_style eol_style) 1493 { 1494 struct evbuffer_ptr it; 1495 char *line; 1496 size_t n_to_copy=0, extra_drain=0; 1497 char *result = NULL; 1498 1499 EVBUFFER_LOCK(buffer); 1500 1501 if (buffer->freeze_start) { 1502 goto done; 1503 } 1504 1505 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1506 if (it.pos < 0) 1507 goto done; 1508 n_to_copy = it.pos; 1509 1510 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1511 event_warn("%s: out of memory", __func__); 1512 goto done; 1513 } 1514 1515 evbuffer_remove(buffer, line, n_to_copy); 1516 line[n_to_copy] = '\0'; 1517 1518 evbuffer_drain(buffer, extra_drain); 1519 result = line; 1520 done: 1521 EVBUFFER_UNLOCK(buffer); 1522 1523 if (n_read_out) 1524 *n_read_out = result ? n_to_copy : 0; 1525 1526 return result; 1527 } 1528 1529 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1530 1531 /* Adds data to an event buffer */ 1532 1533 int 1534 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1535 { 1536 struct evbuffer_chain *chain, *tmp; 1537 const unsigned char *data = data_in; 1538 size_t remain, to_alloc; 1539 int result = -1; 1540 1541 EVBUFFER_LOCK(buf); 1542 1543 if (buf->freeze_end) { 1544 goto done; 1545 } 1546 1547 chain = buf->last; 1548 1549 /* If there are no chains allocated for this buffer, allocate one 1550 * big enough to hold all the data. */ 1551 if (chain == NULL) { 1552 chain = evbuffer_chain_new(datlen); 1553 if (!chain) 1554 goto done; 1555 evbuffer_chain_insert(buf, chain); 1556 } 1557 1558 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1559 remain = (size_t)(chain->buffer_len - chain->misalign - chain->off); 1560 if (remain >= datlen) { 1561 /* there's enough space to hold all the data in the 1562 * current last chain */ 1563 memcpy(chain->buffer + chain->misalign + chain->off, 1564 data, datlen); 1565 chain->off += datlen; 1566 buf->total_len += datlen; 1567 buf->n_add_for_cb += datlen; 1568 goto out; 1569 } else if (!CHAIN_PINNED(chain) && 1570 evbuffer_chain_should_realign(chain, datlen)) { 1571 /* we can fit the data into the misalignment */ 1572 evbuffer_chain_align(chain); 1573 1574 memcpy(chain->buffer + chain->off, data, datlen); 1575 chain->off += datlen; 1576 buf->total_len += datlen; 1577 buf->n_add_for_cb += datlen; 1578 goto out; 1579 } 1580 } else { 1581 /* we cannot write any data to the last chain */ 1582 remain = 0; 1583 } 1584 1585 /* we need to add another chain */ 1586 to_alloc = chain->buffer_len; 1587 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1588 to_alloc <<= 1; 1589 if (datlen > to_alloc) 1590 to_alloc = datlen; 1591 tmp = evbuffer_chain_new(to_alloc); 1592 if (tmp == NULL) 1593 goto done; 1594 1595 if (remain) { 1596 memcpy(chain->buffer + chain->misalign + chain->off, 1597 data, remain); 1598 chain->off += remain; 1599 buf->total_len += remain; 1600 buf->n_add_for_cb += remain; 1601 } 1602 1603 data += remain; 1604 datlen -= remain; 1605 1606 memcpy(tmp->buffer, data, datlen); 1607 tmp->off = datlen; 1608 evbuffer_chain_insert(buf, tmp); 1609 buf->n_add_for_cb += datlen; 1610 1611 out: 1612 evbuffer_invoke_callbacks(buf); 1613 result = 0; 1614 done: 1615 EVBUFFER_UNLOCK(buf); 1616 return result; 1617 } 1618 1619 int 1620 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1621 { 1622 struct evbuffer_chain *chain, *tmp; 1623 int result = -1; 1624 1625 EVBUFFER_LOCK(buf); 1626 1627 if (buf->freeze_start) { 1628 goto done; 1629 } 1630 1631 chain = buf->first; 1632 1633 if (chain == NULL) { 1634 chain = evbuffer_chain_new(datlen); 1635 if (!chain) 1636 goto done; 1637 evbuffer_chain_insert(buf, chain); 1638 } 1639 1640 /* we cannot touch immutable buffers */ 1641 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1642 /* If this chain is empty, we can treat it as 1643 * 'empty at the beginning' rather than 'empty at the end' */ 1644 if (chain->off == 0) 1645 chain->misalign = chain->buffer_len; 1646 1647 if ((size_t)chain->misalign >= datlen) { 1648 /* we have enough space to fit everything */ 1649 memcpy(chain->buffer + chain->misalign - datlen, 1650 data, datlen); 1651 chain->off += datlen; 1652 chain->misalign -= datlen; 1653 buf->total_len += datlen; 1654 buf->n_add_for_cb += datlen; 1655 goto out; 1656 } else if (chain->misalign) { 1657 /* we can only fit some of the data. */ 1658 memcpy(chain->buffer, 1659 (char*)data + datlen - chain->misalign, 1660 (size_t)chain->misalign); 1661 chain->off += (size_t)chain->misalign; 1662 buf->total_len += (size_t)chain->misalign; 1663 buf->n_add_for_cb += (size_t)chain->misalign; 1664 datlen -= (size_t)chain->misalign; 1665 chain->misalign = 0; 1666 } 1667 } 1668 1669 /* we need to add another chain */ 1670 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1671 goto done; 1672 buf->first = tmp; 1673 if (buf->last_with_datap == &buf->first) 1674 buf->last_with_datap = &tmp->next; 1675 1676 tmp->next = chain; 1677 1678 tmp->off = datlen; 1679 tmp->misalign = tmp->buffer_len - datlen; 1680 1681 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1682 buf->total_len += datlen; 1683 buf->n_add_for_cb += (size_t)chain->misalign; 1684 1685 out: 1686 evbuffer_invoke_callbacks(buf); 1687 result = 0; 1688 done: 1689 EVBUFFER_UNLOCK(buf); 1690 return result; 1691 } 1692 1693 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1694 static void 1695 evbuffer_chain_align(struct evbuffer_chain *chain) 1696 { 1697 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1698 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1699 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1700 chain->misalign = 0; 1701 } 1702 1703 #define MAX_TO_COPY_IN_EXPAND 4096 1704 #define MAX_TO_REALIGN_IN_EXPAND 2048 1705 1706 /** Helper: return true iff we should realign chain to fit datalen bytes of 1707 data in it. */ 1708 static int 1709 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1710 size_t datlen) 1711 { 1712 return chain->buffer_len - chain->off >= datlen && 1713 (chain->off < chain->buffer_len / 2) && 1714 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1715 } 1716 1717 /* Expands the available space in the event buffer to at least datlen, all in 1718 * a single chunk. Return that chunk. */ 1719 static struct evbuffer_chain * 1720 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1721 { 1722 struct evbuffer_chain *chain, **chainp; 1723 struct evbuffer_chain *result = NULL; 1724 ASSERT_EVBUFFER_LOCKED(buf); 1725 1726 chainp = buf->last_with_datap; 1727 1728 /* XXX If *chainp is no longer writeable, but has enough space in its 1729 * misalign, this might be a bad idea: we could still use *chainp, not 1730 * (*chainp)->next. */ 1731 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1732 chainp = &(*chainp)->next; 1733 1734 /* 'chain' now points to the first chain with writable space (if any) 1735 * We will either use it, realign it, replace it, or resize it. */ 1736 chain = *chainp; 1737 1738 if (chain == NULL || 1739 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1740 /* We can't use the last_with_data chain at all. Just add a 1741 * new one that's big enough. */ 1742 goto insert_new; 1743 } 1744 1745 /* If we can fit all the data, then we don't have to do anything */ 1746 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1747 result = chain; 1748 goto ok; 1749 } 1750 1751 /* If the chain is completely empty, just replace it by adding a new 1752 * empty chain. */ 1753 if (chain->off == 0) { 1754 goto insert_new; 1755 } 1756 1757 /* If the misalignment plus the remaining space fulfills our data 1758 * needs, we could just force an alignment to happen. Afterwards, we 1759 * have enough space. But only do this if we're saving a lot of space 1760 * and not moving too much data. Otherwise the space savings are 1761 * probably offset by the time lost in copying. 1762 */ 1763 if (evbuffer_chain_should_realign(chain, datlen)) { 1764 evbuffer_chain_align(chain); 1765 result = chain; 1766 goto ok; 1767 } 1768 1769 /* At this point, we can either resize the last chunk with space in 1770 * it, use the next chunk after it, or If we add a new chunk, we waste 1771 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1772 * resize, we have to copy chain->off bytes. 1773 */ 1774 1775 /* Would expanding this chunk be affordable and worthwhile? */ 1776 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1777 chain->off > MAX_TO_COPY_IN_EXPAND) { 1778 /* It's not worth resizing this chain. Can the next one be 1779 * used? */ 1780 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 1781 /* Yes, we can just use the next chain (which should 1782 * be empty. */ 1783 result = chain->next; 1784 goto ok; 1785 } else { 1786 /* No; append a new chain (which will free all 1787 * terminal empty chains.) */ 1788 goto insert_new; 1789 } 1790 } else { 1791 /* Okay, we're going to try to resize this chain: Not doing so 1792 * would waste at least 1/8 of its current allocation, and we 1793 * can do so without having to copy more than 1794 * MAX_TO_COPY_IN_EXPAND bytes. */ 1795 /* figure out how much space we need */ 1796 size_t length = chain->off + datlen; 1797 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 1798 if (tmp == NULL) 1799 goto err; 1800 1801 /* copy the data over that we had so far */ 1802 tmp->off = chain->off; 1803 memcpy(tmp->buffer, chain->buffer + chain->misalign, 1804 chain->off); 1805 /* fix up the list */ 1806 EVUTIL_ASSERT(*chainp == chain); 1807 result = *chainp = tmp; 1808 1809 if (buf->last == chain) 1810 buf->last = tmp; 1811 1812 tmp->next = chain->next; 1813 evbuffer_chain_free(chain); 1814 goto ok; 1815 } 1816 1817 insert_new: 1818 result = evbuffer_chain_insert_new(buf, datlen); 1819 if (!result) 1820 goto err; 1821 ok: 1822 EVUTIL_ASSERT(result); 1823 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 1824 err: 1825 return result; 1826 } 1827 1828 /* Make sure that datlen bytes are available for writing in the last n 1829 * chains. Never copies or moves data. */ 1830 int 1831 _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n) 1832 { 1833 struct evbuffer_chain *chain = buf->last, *tmp, *next; 1834 size_t avail; 1835 int used; 1836 1837 ASSERT_EVBUFFER_LOCKED(buf); 1838 EVUTIL_ASSERT(n >= 2); 1839 1840 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 1841 /* There is no last chunk, or we can't touch the last chunk. 1842 * Just add a new chunk. */ 1843 chain = evbuffer_chain_new(datlen); 1844 if (chain == NULL) 1845 return (-1); 1846 1847 evbuffer_chain_insert(buf, chain); 1848 return (0); 1849 } 1850 1851 used = 0; /* number of chains we're using space in. */ 1852 avail = 0; /* how much space they have. */ 1853 /* How many bytes can we stick at the end of buffer as it is? Iterate 1854 * over the chains at the end of the buffer, tring to see how much 1855 * space we have in the first n. */ 1856 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 1857 if (chain->off) { 1858 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 1859 EVUTIL_ASSERT(chain == *buf->last_with_datap); 1860 if (space) { 1861 avail += space; 1862 ++used; 1863 } 1864 } else { 1865 /* No data in chain; realign it. */ 1866 chain->misalign = 0; 1867 avail += chain->buffer_len; 1868 ++used; 1869 } 1870 if (avail >= datlen) { 1871 /* There is already enough space. Just return */ 1872 return (0); 1873 } 1874 if (used == n) 1875 break; 1876 } 1877 1878 /* There wasn't enough space in the first n chains with space in 1879 * them. Either add a new chain with enough space, or replace all 1880 * empty chains with one that has enough space, depending on n. */ 1881 if (used < n) { 1882 /* The loop ran off the end of the chains before it hit n 1883 * chains; we can add another. */ 1884 EVUTIL_ASSERT(chain == NULL); 1885 1886 tmp = evbuffer_chain_new(datlen - avail); 1887 if (tmp == NULL) 1888 return (-1); 1889 1890 buf->last->next = tmp; 1891 buf->last = tmp; 1892 /* (we would only set last_with_data if we added the first 1893 * chain. But if the buffer had no chains, we would have 1894 * just allocated a new chain earlier) */ 1895 return (0); 1896 } else { 1897 /* Nuke _all_ the empty chains. */ 1898 int rmv_all = 0; /* True iff we removed last_with_data. */ 1899 chain = *buf->last_with_datap; 1900 if (!chain->off) { 1901 EVUTIL_ASSERT(chain == buf->first); 1902 rmv_all = 1; 1903 avail = 0; 1904 } else { 1905 avail = (size_t) CHAIN_SPACE_LEN(chain); 1906 chain = chain->next; 1907 } 1908 1909 1910 for (; chain; chain = next) { 1911 next = chain->next; 1912 EVUTIL_ASSERT(chain->off == 0); 1913 evbuffer_chain_free(chain); 1914 } 1915 tmp = evbuffer_chain_new(datlen - avail); 1916 if (tmp == NULL) { 1917 if (rmv_all) { 1918 ZERO_CHAIN(buf); 1919 } else { 1920 buf->last = *buf->last_with_datap; 1921 (*buf->last_with_datap)->next = NULL; 1922 } 1923 return (-1); 1924 } 1925 1926 if (rmv_all) { 1927 buf->first = buf->last = tmp; 1928 buf->last_with_datap = &buf->first; 1929 } else { 1930 (*buf->last_with_datap)->next = tmp; 1931 buf->last = tmp; 1932 } 1933 return (0); 1934 } 1935 } 1936 1937 int 1938 evbuffer_expand(struct evbuffer *buf, size_t datlen) 1939 { 1940 struct evbuffer_chain *chain; 1941 1942 EVBUFFER_LOCK(buf); 1943 chain = evbuffer_expand_singlechain(buf, datlen); 1944 EVBUFFER_UNLOCK(buf); 1945 return chain ? 0 : -1; 1946 } 1947 1948 /* 1949 * Reads data from a file descriptor into a buffer. 1950 */ 1951 1952 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32) 1953 #define USE_IOVEC_IMPL 1954 #endif 1955 1956 #ifdef USE_IOVEC_IMPL 1957 1958 #ifdef _EVENT_HAVE_SYS_UIO_H 1959 /* number of iovec we use for writev, fragmentation is going to determine 1960 * how much we end up writing */ 1961 1962 #define DEFAULT_WRITE_IOVEC 128 1963 1964 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 1965 #define NUM_WRITE_IOVEC UIO_MAXIOV 1966 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 1967 #define NUM_WRITE_IOVEC IOV_MAX 1968 #else 1969 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 1970 #endif 1971 1972 #define IOV_TYPE struct iovec 1973 #define IOV_PTR_FIELD iov_base 1974 #define IOV_LEN_FIELD iov_len 1975 #define IOV_LEN_TYPE size_t 1976 #else 1977 #define NUM_WRITE_IOVEC 16 1978 #define IOV_TYPE WSABUF 1979 #define IOV_PTR_FIELD buf 1980 #define IOV_LEN_FIELD len 1981 #define IOV_LEN_TYPE unsigned long 1982 #endif 1983 #endif 1984 #define NUM_READ_IOVEC 4 1985 1986 #define EVBUFFER_MAX_READ 4096 1987 1988 /** Helper function to figure out which space to use for reading data into 1989 an evbuffer. Internal use only. 1990 1991 @param buf The buffer to read into 1992 @param howmuch How much we want to read. 1993 @param vecs An array of two or more iovecs or WSABUFs. 1994 @param n_vecs_avail The length of vecs 1995 @param chainp A pointer to a variable to hold the first chain we're 1996 reading into. 1997 @param exact Boolean: if true, we do not provide more than 'howmuch' 1998 space in the vectors, even if more space is available. 1999 @return The number of buffers we're using. 2000 */ 2001 int 2002 _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch, 2003 struct evbuffer_iovec *vecs, int n_vecs_avail, 2004 struct evbuffer_chain ***chainp, int exact) 2005 { 2006 struct evbuffer_chain *chain; 2007 struct evbuffer_chain **firstchainp; 2008 size_t so_far; 2009 int i; 2010 ASSERT_EVBUFFER_LOCKED(buf); 2011 2012 if (howmuch < 0) 2013 return -1; 2014 2015 so_far = 0; 2016 /* Let firstchain be the first chain with any space on it */ 2017 firstchainp = buf->last_with_datap; 2018 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2019 firstchainp = &(*firstchainp)->next; 2020 } 2021 2022 chain = *firstchainp; 2023 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2024 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2025 if (avail > (howmuch - so_far) && exact) 2026 avail = howmuch - so_far; 2027 vecs[i].iov_base = CHAIN_SPACE_PTR(chain); 2028 vecs[i].iov_len = avail; 2029 so_far += avail; 2030 chain = chain->next; 2031 } 2032 2033 *chainp = firstchainp; 2034 return i; 2035 } 2036 2037 static int 2038 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2039 { 2040 #if defined(FIONREAD) && defined(WIN32) 2041 unsigned long lng = EVBUFFER_MAX_READ; 2042 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2043 return -1; 2044 return (int)lng; 2045 #elif defined(FIONREAD) 2046 int n = EVBUFFER_MAX_READ; 2047 if (ioctl(fd, FIONREAD, &n) < 0) 2048 return -1; 2049 return n; 2050 #else 2051 return EVBUFFER_MAX_READ; 2052 #endif 2053 } 2054 2055 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2056 * as howmuch? */ 2057 int 2058 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2059 { 2060 struct evbuffer_chain **chainp; 2061 int n; 2062 int result; 2063 2064 #ifdef USE_IOVEC_IMPL 2065 int nvecs, i, remaining; 2066 #else 2067 struct evbuffer_chain *chain; 2068 unsigned char *p; 2069 #endif 2070 2071 EVBUFFER_LOCK(buf); 2072 2073 if (buf->freeze_end) { 2074 result = -1; 2075 goto done; 2076 } 2077 2078 n = get_n_bytes_readable_on_socket(fd); 2079 if (n <= 0 || n > EVBUFFER_MAX_READ) 2080 n = EVBUFFER_MAX_READ; 2081 if (howmuch < 0 || howmuch > n) 2082 howmuch = n; 2083 2084 #ifdef USE_IOVEC_IMPL 2085 /* Since we can use iovecs, we're willing to use the last 2086 * NUM_READ_IOVEC chains. */ 2087 if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) { 2088 result = -1; 2089 goto done; 2090 } else { 2091 IOV_TYPE vecs[NUM_READ_IOVEC]; 2092 #ifdef _EVBUFFER_IOVEC_IS_NATIVE 2093 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs, 2094 NUM_READ_IOVEC, &chainp, 1); 2095 #else 2096 /* We aren't using the native struct iovec. Therefore, 2097 we are on win32. */ 2098 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2099 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2, 2100 &chainp, 1); 2101 2102 for (i=0; i < nvecs; ++i) 2103 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2104 #endif 2105 2106 #ifdef WIN32 2107 { 2108 DWORD bytesRead; 2109 DWORD flags=0; 2110 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2111 /* The read failed. It might be a close, 2112 * or it might be an error. */ 2113 if (WSAGetLastError() == WSAECONNABORTED) 2114 n = 0; 2115 else 2116 n = -1; 2117 } else 2118 n = bytesRead; 2119 } 2120 #else 2121 n = readv(fd, vecs, nvecs); 2122 #endif 2123 } 2124 2125 #else /*!USE_IOVEC_IMPL*/ 2126 /* If we don't have FIONREAD, we might waste some space here */ 2127 /* XXX we _will_ waste some space here if there is any space left 2128 * over on buf->last. */ 2129 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2130 result = -1; 2131 goto done; 2132 } 2133 2134 /* We can append new data at this point */ 2135 p = chain->buffer + chain->misalign + chain->off; 2136 2137 #ifndef WIN32 2138 n = read(fd, p, howmuch); 2139 #else 2140 n = recv(fd, p, howmuch, 0); 2141 #endif 2142 #endif /* USE_IOVEC_IMPL */ 2143 2144 if (n == -1) { 2145 result = -1; 2146 goto done; 2147 } 2148 if (n == 0) { 2149 result = 0; 2150 goto done; 2151 } 2152 2153 #ifdef USE_IOVEC_IMPL 2154 remaining = n; 2155 for (i=0; i < nvecs; ++i) { 2156 ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp); 2157 if (space < remaining) { 2158 (*chainp)->off += space; 2159 remaining -= (int)space; 2160 } else { 2161 (*chainp)->off += remaining; 2162 buf->last_with_datap = chainp; 2163 break; 2164 } 2165 chainp = &(*chainp)->next; 2166 } 2167 #else 2168 chain->off += n; 2169 advance_last_with_data(buf); 2170 #endif 2171 buf->total_len += n; 2172 buf->n_add_for_cb += n; 2173 2174 /* Tell someone about changes in this buffer */ 2175 evbuffer_invoke_callbacks(buf); 2176 result = n; 2177 done: 2178 EVBUFFER_UNLOCK(buf); 2179 return result; 2180 } 2181 2182 #ifdef WIN32 2183 static int 2184 evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch) 2185 { 2186 int result; 2187 int nchains, n; 2188 struct evbuffer_iovec v[2]; 2189 2190 EVBUFFER_LOCK(buf); 2191 2192 if (buf->freeze_end) { 2193 result = -1; 2194 goto done; 2195 } 2196 2197 if (howmuch < 0) 2198 howmuch = 16384; 2199 2200 2201 /* XXX we _will_ waste some space here if there is any space left 2202 * over on buf->last. */ 2203 nchains = evbuffer_reserve_space(buf, howmuch, v, 2); 2204 if (nchains < 1 || nchains > 2) { 2205 result = -1; 2206 goto done; 2207 } 2208 n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len); 2209 if (n <= 0) { 2210 result = n; 2211 goto done; 2212 } 2213 v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/ 2214 if (nchains > 1) { 2215 n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len); 2216 if (n <= 0) { 2217 result = (unsigned long) v[0].iov_len; 2218 evbuffer_commit_space(buf, v, 1); 2219 goto done; 2220 } 2221 v[1].iov_len = n; 2222 } 2223 evbuffer_commit_space(buf, v, nchains); 2224 2225 result = n; 2226 done: 2227 EVBUFFER_UNLOCK(buf); 2228 return result; 2229 } 2230 #endif 2231 2232 #ifdef USE_IOVEC_IMPL 2233 static inline int 2234 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2235 ev_ssize_t howmuch) 2236 { 2237 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2238 struct evbuffer_chain *chain = buffer->first; 2239 int n, i = 0; 2240 2241 if (howmuch < 0) 2242 return -1; 2243 2244 ASSERT_EVBUFFER_LOCKED(buffer); 2245 /* XXX make this top out at some maximal data length? if the 2246 * buffer has (say) 1MB in it, split over 128 chains, there's 2247 * no way it all gets written in one go. */ 2248 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2249 #ifdef USE_SENDFILE 2250 /* we cannot write the file info via writev */ 2251 if (chain->flags & EVBUFFER_SENDFILE) 2252 break; 2253 #endif 2254 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2255 if ((size_t)howmuch >= chain->off) { 2256 /* XXXcould be problematic when windows supports mmap*/ 2257 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2258 howmuch -= chain->off; 2259 } else { 2260 /* XXXcould be problematic when windows supports mmap*/ 2261 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2262 break; 2263 } 2264 chain = chain->next; 2265 } 2266 if (! i) 2267 return 0; 2268 #ifdef WIN32 2269 { 2270 DWORD bytesSent; 2271 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2272 n = -1; 2273 else 2274 n = bytesSent; 2275 } 2276 #else 2277 n = writev(fd, iov, i); 2278 #endif 2279 return (n); 2280 } 2281 #endif 2282 2283 #ifdef USE_SENDFILE 2284 static inline int 2285 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd, 2286 ev_ssize_t howmuch) 2287 { 2288 struct evbuffer_chain *chain = buffer->first; 2289 struct evbuffer_chain_fd *info = 2290 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain); 2291 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2292 int res; 2293 off_t len = chain->off; 2294 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2295 ev_ssize_t res; 2296 off_t offset = chain->misalign; 2297 #endif 2298 2299 ASSERT_EVBUFFER_LOCKED(buffer); 2300 2301 #if defined(SENDFILE_IS_MACOSX) 2302 res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0); 2303 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2304 return (-1); 2305 2306 return (len); 2307 #elif defined(SENDFILE_IS_FREEBSD) 2308 res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0); 2309 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2310 return (-1); 2311 2312 return (len); 2313 #elif defined(SENDFILE_IS_LINUX) 2314 /* TODO(niels): implement splice */ 2315 res = sendfile(fd, info->fd, &offset, chain->off); 2316 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2317 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2318 return (0); 2319 } 2320 return (res); 2321 #elif defined(SENDFILE_IS_SOLARIS) 2322 { 2323 const off_t offset_orig = offset; 2324 res = sendfile(fd, info->fd, &offset, chain->off); 2325 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2326 if (offset - offset_orig) 2327 return offset - offset_orig; 2328 /* if this is EAGAIN or EINTR and no bytes were 2329 * written, return 0 */ 2330 return (0); 2331 } 2332 return (res); 2333 } 2334 #endif 2335 } 2336 #endif 2337 2338 int 2339 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2340 ev_ssize_t howmuch) 2341 { 2342 int n = -1; 2343 2344 EVBUFFER_LOCK(buffer); 2345 2346 if (buffer->freeze_start) { 2347 goto done; 2348 } 2349 2350 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2351 howmuch = buffer->total_len; 2352 2353 if (howmuch > 0) { 2354 #ifdef USE_SENDFILE 2355 struct evbuffer_chain *chain = buffer->first; 2356 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2357 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2358 else { 2359 #endif 2360 #ifdef USE_IOVEC_IMPL 2361 n = evbuffer_write_iovec(buffer, fd, howmuch); 2362 #elif defined(WIN32) 2363 /* XXX(nickm) Don't disable this code until we know if 2364 * the WSARecv code above works. */ 2365 void *p = evbuffer_pullup(buffer, howmuch); 2366 EVUTIL_ASSERT(p || !howmuch); 2367 n = send(fd, p, howmuch, 0); 2368 #else 2369 void *p = evbuffer_pullup(buffer, howmuch); 2370 EVUTIL_ASSERT(p || !howmuch); 2371 n = write(fd, p, howmuch); 2372 #endif 2373 #ifdef USE_SENDFILE 2374 } 2375 #endif 2376 } 2377 2378 if (n > 0) 2379 evbuffer_drain(buffer, n); 2380 2381 done: 2382 EVBUFFER_UNLOCK(buffer); 2383 return (n); 2384 } 2385 2386 int 2387 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2388 { 2389 return evbuffer_write_atmost(buffer, fd, -1); 2390 } 2391 2392 unsigned char * 2393 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2394 { 2395 unsigned char *search; 2396 struct evbuffer_ptr ptr; 2397 2398 EVBUFFER_LOCK(buffer); 2399 2400 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2401 if (ptr.pos < 0) { 2402 search = NULL; 2403 } else { 2404 search = evbuffer_pullup(buffer, ptr.pos + len); 2405 if (search) 2406 search += ptr.pos; 2407 } 2408 EVBUFFER_UNLOCK(buffer); 2409 return search; 2410 } 2411 2412 int 2413 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2414 size_t position, enum evbuffer_ptr_how how) 2415 { 2416 size_t left = position; 2417 struct evbuffer_chain *chain = NULL; 2418 2419 EVBUFFER_LOCK(buf); 2420 2421 switch (how) { 2422 case EVBUFFER_PTR_SET: 2423 chain = buf->first; 2424 pos->pos = position; 2425 position = 0; 2426 break; 2427 case EVBUFFER_PTR_ADD: 2428 /* this avoids iterating over all previous chains if 2429 we just want to advance the position */ 2430 chain = pos->_internal.chain; 2431 pos->pos += position; 2432 position = pos->_internal.pos_in_chain; 2433 break; 2434 } 2435 2436 while (chain && position + left >= chain->off) { 2437 left -= chain->off - position; 2438 chain = chain->next; 2439 position = 0; 2440 } 2441 if (chain) { 2442 pos->_internal.chain = chain; 2443 pos->_internal.pos_in_chain = position + left; 2444 } else { 2445 pos->_internal.chain = NULL; 2446 pos->pos = -1; 2447 } 2448 2449 EVBUFFER_UNLOCK(buf); 2450 2451 return chain != NULL ? 0 : -1; 2452 } 2453 2454 /** 2455 Compare the bytes in buf at position pos to the len bytes in mem. Return 2456 less than 0, 0, or greater than 0 as memcmp. 2457 */ 2458 static int 2459 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2460 const char *mem, size_t len) 2461 { 2462 struct evbuffer_chain *chain; 2463 size_t position; 2464 int r; 2465 2466 ASSERT_EVBUFFER_LOCKED(buf); 2467 2468 if (pos->pos + len > buf->total_len) 2469 return -1; 2470 2471 chain = pos->_internal.chain; 2472 position = pos->_internal.pos_in_chain; 2473 while (len && chain) { 2474 size_t n_comparable; 2475 if (len + position > chain->off) 2476 n_comparable = chain->off - position; 2477 else 2478 n_comparable = len; 2479 r = memcmp(chain->buffer + chain->misalign + position, mem, 2480 n_comparable); 2481 if (r) 2482 return r; 2483 mem += n_comparable; 2484 len -= n_comparable; 2485 position = 0; 2486 chain = chain->next; 2487 } 2488 2489 return 0; 2490 } 2491 2492 struct evbuffer_ptr 2493 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2494 { 2495 return evbuffer_search_range(buffer, what, len, start, NULL); 2496 } 2497 2498 struct evbuffer_ptr 2499 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2500 { 2501 struct evbuffer_ptr pos; 2502 struct evbuffer_chain *chain, *last_chain = NULL; 2503 const unsigned char *p; 2504 char first; 2505 2506 EVBUFFER_LOCK(buffer); 2507 2508 if (start) { 2509 memcpy(&pos, start, sizeof(pos)); 2510 chain = pos._internal.chain; 2511 } else { 2512 pos.pos = 0; 2513 chain = pos._internal.chain = buffer->first; 2514 pos._internal.pos_in_chain = 0; 2515 } 2516 2517 if (end) 2518 last_chain = end->_internal.chain; 2519 2520 if (!len || len > EV_SSIZE_MAX) 2521 goto done; 2522 2523 first = what[0]; 2524 2525 while (chain) { 2526 const unsigned char *start_at = 2527 chain->buffer + chain->misalign + 2528 pos._internal.pos_in_chain; 2529 p = memchr(start_at, first, 2530 chain->off - pos._internal.pos_in_chain); 2531 if (p) { 2532 pos.pos += p - start_at; 2533 pos._internal.pos_in_chain += p - start_at; 2534 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2535 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2536 goto not_found; 2537 else 2538 goto done; 2539 } 2540 ++pos.pos; 2541 ++pos._internal.pos_in_chain; 2542 if (pos._internal.pos_in_chain == chain->off) { 2543 chain = pos._internal.chain = chain->next; 2544 pos._internal.pos_in_chain = 0; 2545 } 2546 } else { 2547 if (chain == last_chain) 2548 goto not_found; 2549 pos.pos += chain->off - pos._internal.pos_in_chain; 2550 chain = pos._internal.chain = chain->next; 2551 pos._internal.pos_in_chain = 0; 2552 } 2553 } 2554 2555 not_found: 2556 pos.pos = -1; 2557 pos._internal.chain = NULL; 2558 done: 2559 EVBUFFER_UNLOCK(buffer); 2560 return pos; 2561 } 2562 2563 int 2564 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2565 struct evbuffer_ptr *start_at, 2566 struct evbuffer_iovec *vec, int n_vec) 2567 { 2568 struct evbuffer_chain *chain; 2569 int idx = 0; 2570 ev_ssize_t len_so_far = 0; 2571 2572 EVBUFFER_LOCK(buffer); 2573 2574 if (start_at) { 2575 chain = start_at->_internal.chain; 2576 len_so_far = chain->off 2577 - start_at->_internal.pos_in_chain; 2578 idx = 1; 2579 if (n_vec > 0) { 2580 vec[0].iov_base = chain->buffer + chain->misalign 2581 + start_at->_internal.pos_in_chain; 2582 vec[0].iov_len = len_so_far; 2583 } 2584 chain = chain->next; 2585 } else { 2586 chain = buffer->first; 2587 } 2588 2589 if (n_vec == 0 && len < 0) { 2590 /* If no vectors are provided and they asked for "everything", 2591 * pretend they asked for the actual available amount. */ 2592 len = buffer->total_len; 2593 if (start_at) { 2594 len -= start_at->pos; 2595 } 2596 } 2597 2598 while (chain) { 2599 if (len >= 0 && len_so_far >= len) 2600 break; 2601 if (idx<n_vec) { 2602 vec[idx].iov_base = chain->buffer + chain->misalign; 2603 vec[idx].iov_len = chain->off; 2604 } else if (len<0) { 2605 break; 2606 } 2607 ++idx; 2608 len_so_far += chain->off; 2609 chain = chain->next; 2610 } 2611 2612 EVBUFFER_UNLOCK(buffer); 2613 2614 return idx; 2615 } 2616 2617 2618 int 2619 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2620 { 2621 char *buffer; 2622 size_t space; 2623 int sz, result = -1; 2624 va_list aq; 2625 struct evbuffer_chain *chain; 2626 2627 2628 EVBUFFER_LOCK(buf); 2629 2630 if (buf->freeze_end) { 2631 goto done; 2632 } 2633 2634 /* make sure that at least some space is available */ 2635 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2636 goto done; 2637 2638 for (;;) { 2639 #if 0 2640 size_t used = chain->misalign + chain->off; 2641 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2642 EVUTIL_ASSERT(chain->buffer_len >= used); 2643 space = chain->buffer_len - used; 2644 #endif 2645 buffer = (char*) CHAIN_SPACE_PTR(chain); 2646 space = (size_t) CHAIN_SPACE_LEN(chain); 2647 2648 #ifndef va_copy 2649 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2650 #endif 2651 va_copy(aq, ap); 2652 2653 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2654 2655 va_end(aq); 2656 2657 if (sz < 0) 2658 goto done; 2659 if ((size_t)sz < space) { 2660 chain->off += sz; 2661 buf->total_len += sz; 2662 buf->n_add_for_cb += sz; 2663 2664 advance_last_with_data(buf); 2665 evbuffer_invoke_callbacks(buf); 2666 result = sz; 2667 goto done; 2668 } 2669 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2670 goto done; 2671 } 2672 /* NOTREACHED */ 2673 2674 done: 2675 EVBUFFER_UNLOCK(buf); 2676 return result; 2677 } 2678 2679 int 2680 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2681 { 2682 int res = -1; 2683 va_list ap; 2684 2685 va_start(ap, fmt); 2686 res = evbuffer_add_vprintf(buf, fmt, ap); 2687 va_end(ap); 2688 2689 return (res); 2690 } 2691 2692 int 2693 evbuffer_add_reference(struct evbuffer *outbuf, 2694 const void *data, size_t datlen, 2695 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2696 { 2697 struct evbuffer_chain *chain; 2698 struct evbuffer_chain_reference *info; 2699 int result = -1; 2700 2701 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2702 if (!chain) 2703 return (-1); 2704 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2705 chain->buffer = (u_char *)data; 2706 chain->buffer_len = datlen; 2707 chain->off = datlen; 2708 2709 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2710 info->cleanupfn = cleanupfn; 2711 info->extra = extra; 2712 2713 EVBUFFER_LOCK(outbuf); 2714 if (outbuf->freeze_end) { 2715 /* don't call chain_free; we do not want to actually invoke 2716 * the cleanup function */ 2717 mm_free(chain); 2718 goto done; 2719 } 2720 evbuffer_chain_insert(outbuf, chain); 2721 outbuf->n_add_for_cb += datlen; 2722 2723 evbuffer_invoke_callbacks(outbuf); 2724 2725 result = 0; 2726 done: 2727 EVBUFFER_UNLOCK(outbuf); 2728 2729 return result; 2730 } 2731 2732 /* TODO(niels): maybe we don't want to own the fd, however, in that 2733 * case, we should dup it - dup is cheap. Perhaps, we should use a 2734 * callback instead? 2735 */ 2736 /* TODO(niels): we may want to add to automagically convert to mmap, in 2737 * case evbuffer_remove() or evbuffer_pullup() are being used. 2738 */ 2739 int 2740 evbuffer_add_file(struct evbuffer *outbuf, int fd, 2741 ev_off_t offset, ev_off_t length) 2742 { 2743 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP) 2744 struct evbuffer_chain *chain; 2745 struct evbuffer_chain_fd *info; 2746 #endif 2747 #if defined(USE_SENDFILE) 2748 int sendfile_okay = 1; 2749 #endif 2750 int ok = 1; 2751 2752 #if defined(USE_SENDFILE) 2753 if (use_sendfile) { 2754 EVBUFFER_LOCK(outbuf); 2755 sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD; 2756 EVBUFFER_UNLOCK(outbuf); 2757 } 2758 2759 if (use_sendfile && sendfile_okay) { 2760 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd)); 2761 if (chain == NULL) { 2762 event_warn("%s: out of memory", __func__); 2763 return (-1); 2764 } 2765 2766 chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE; 2767 chain->buffer = NULL; /* no reading possible */ 2768 chain->buffer_len = length + offset; 2769 chain->off = length; 2770 chain->misalign = offset; 2771 2772 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain); 2773 info->fd = fd; 2774 2775 EVBUFFER_LOCK(outbuf); 2776 if (outbuf->freeze_end) { 2777 mm_free(chain); 2778 ok = 0; 2779 } else { 2780 outbuf->n_add_for_cb += length; 2781 evbuffer_chain_insert(outbuf, chain); 2782 } 2783 } else 2784 #endif 2785 #if defined(_EVENT_HAVE_MMAP) 2786 if (use_mmap) { 2787 void *mapped = mmap(NULL, length + offset, PROT_READ, 2788 #ifdef MAP_NOCACHE 2789 MAP_NOCACHE | 2790 #endif 2791 #ifdef MAP_FILE 2792 MAP_FILE | 2793 #endif 2794 MAP_PRIVATE, 2795 fd, 0); 2796 /* some mmap implementations require offset to be a multiple of 2797 * the page size. most users of this api, are likely to use 0 2798 * so mapping everything is not likely to be a problem. 2799 * TODO(niels): determine page size and round offset to that 2800 * page size to avoid mapping too much memory. 2801 */ 2802 if (mapped == MAP_FAILED) { 2803 event_warn("%s: mmap(%d, %d, %zu) failed", 2804 __func__, fd, 0, (size_t)(offset + length)); 2805 return (-1); 2806 } 2807 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd)); 2808 if (chain == NULL) { 2809 event_warn("%s: out of memory", __func__); 2810 munmap(mapped, length); 2811 return (-1); 2812 } 2813 2814 chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE; 2815 chain->buffer = mapped; 2816 chain->buffer_len = length + offset; 2817 chain->off = length + offset; 2818 2819 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain); 2820 info->fd = fd; 2821 2822 EVBUFFER_LOCK(outbuf); 2823 if (outbuf->freeze_end) { 2824 info->fd = -1; 2825 evbuffer_chain_free(chain); 2826 ok = 0; 2827 } else { 2828 outbuf->n_add_for_cb += length; 2829 2830 evbuffer_chain_insert(outbuf, chain); 2831 2832 /* we need to subtract whatever we don't need */ 2833 evbuffer_drain(outbuf, offset); 2834 } 2835 } else 2836 #endif 2837 { 2838 /* the default implementation */ 2839 struct evbuffer *tmp = evbuffer_new(); 2840 ev_ssize_t read; 2841 2842 if (tmp == NULL) 2843 return (-1); 2844 2845 #ifdef WIN32 2846 #define lseek _lseeki64 2847 #endif 2848 if (lseek(fd, offset, SEEK_SET) == -1) { 2849 evbuffer_free(tmp); 2850 return (-1); 2851 } 2852 2853 /* we add everything to a temporary buffer, so that we 2854 * can abort without side effects if the read fails. 2855 */ 2856 while (length) { 2857 read = evbuffer_readfile(tmp, fd, (ev_ssize_t)length); 2858 if (read == -1) { 2859 evbuffer_free(tmp); 2860 return (-1); 2861 } 2862 2863 length -= read; 2864 } 2865 2866 EVBUFFER_LOCK(outbuf); 2867 if (outbuf->freeze_end) { 2868 evbuffer_free(tmp); 2869 ok = 0; 2870 } else { 2871 evbuffer_add_buffer(outbuf, tmp); 2872 evbuffer_free(tmp); 2873 2874 #ifdef WIN32 2875 #define close _close 2876 #endif 2877 close(fd); 2878 } 2879 } 2880 2881 if (ok) 2882 evbuffer_invoke_callbacks(outbuf); 2883 EVBUFFER_UNLOCK(outbuf); 2884 2885 return ok ? 0 : -1; 2886 } 2887 2888 2889 void 2890 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 2891 { 2892 EVBUFFER_LOCK(buffer); 2893 2894 if (!TAILQ_EMPTY(&buffer->callbacks)) 2895 evbuffer_remove_all_callbacks(buffer); 2896 2897 if (cb) { 2898 struct evbuffer_cb_entry *ent = 2899 evbuffer_add_cb(buffer, NULL, cbarg); 2900 ent->cb.cb_obsolete = cb; 2901 ent->flags |= EVBUFFER_CB_OBSOLETE; 2902 } 2903 EVBUFFER_UNLOCK(buffer); 2904 } 2905 2906 struct evbuffer_cb_entry * 2907 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 2908 { 2909 struct evbuffer_cb_entry *e; 2910 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 2911 return NULL; 2912 EVBUFFER_LOCK(buffer); 2913 e->cb.cb_func = cb; 2914 e->cbarg = cbarg; 2915 e->flags = EVBUFFER_CB_ENABLED; 2916 TAILQ_INSERT_HEAD(&buffer->callbacks, e, next); 2917 EVBUFFER_UNLOCK(buffer); 2918 return e; 2919 } 2920 2921 int 2922 evbuffer_remove_cb_entry(struct evbuffer *buffer, 2923 struct evbuffer_cb_entry *ent) 2924 { 2925 EVBUFFER_LOCK(buffer); 2926 TAILQ_REMOVE(&buffer->callbacks, ent, next); 2927 EVBUFFER_UNLOCK(buffer); 2928 mm_free(ent); 2929 return 0; 2930 } 2931 2932 int 2933 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 2934 { 2935 struct evbuffer_cb_entry *cbent; 2936 int result = -1; 2937 EVBUFFER_LOCK(buffer); 2938 TAILQ_FOREACH(cbent, &buffer->callbacks, next) { 2939 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 2940 result = evbuffer_remove_cb_entry(buffer, cbent); 2941 goto done; 2942 } 2943 } 2944 done: 2945 EVBUFFER_UNLOCK(buffer); 2946 return result; 2947 } 2948 2949 int 2950 evbuffer_cb_set_flags(struct evbuffer *buffer, 2951 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 2952 { 2953 /* the user isn't allowed to mess with these. */ 2954 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 2955 EVBUFFER_LOCK(buffer); 2956 cb->flags |= flags; 2957 EVBUFFER_UNLOCK(buffer); 2958 return 0; 2959 } 2960 2961 int 2962 evbuffer_cb_clear_flags(struct evbuffer *buffer, 2963 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 2964 { 2965 /* the user isn't allowed to mess with these. */ 2966 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 2967 EVBUFFER_LOCK(buffer); 2968 cb->flags &= ~flags; 2969 EVBUFFER_UNLOCK(buffer); 2970 return 0; 2971 } 2972 2973 int 2974 evbuffer_freeze(struct evbuffer *buffer, int start) 2975 { 2976 EVBUFFER_LOCK(buffer); 2977 if (start) 2978 buffer->freeze_start = 1; 2979 else 2980 buffer->freeze_end = 1; 2981 EVBUFFER_UNLOCK(buffer); 2982 return 0; 2983 } 2984 2985 int 2986 evbuffer_unfreeze(struct evbuffer *buffer, int start) 2987 { 2988 EVBUFFER_LOCK(buffer); 2989 if (start) 2990 buffer->freeze_start = 0; 2991 else 2992 buffer->freeze_end = 0; 2993 EVBUFFER_UNLOCK(buffer); 2994 return 0; 2995 } 2996 2997 #if 0 2998 void 2999 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3000 { 3001 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3002 cb->size_before_suspend = evbuffer_get_length(buffer); 3003 cb->flags |= EVBUFFER_CB_SUSPENDED; 3004 } 3005 } 3006 3007 void 3008 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3009 { 3010 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3011 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3012 size_t sz = cb->size_before_suspend; 3013 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3014 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3015 cb->size_before_suspend = 0; 3016 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3017 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3018 } 3019 } 3020 } 3021 #endif 3022 3023 /* These hooks are exposed so that the unit tests can temporarily disable 3024 * sendfile support in order to test mmap, or both to test linear 3025 * access. Don't use it; if we need to add a way to disable sendfile support 3026 * in the future, it will probably be via an alternate version of 3027 * evbuffer_add_file() with a 'flags' argument. 3028 */ 3029 int _evbuffer_testing_use_sendfile(void); 3030 int _evbuffer_testing_use_mmap(void); 3031 int _evbuffer_testing_use_linear_file_access(void); 3032 3033 int 3034 _evbuffer_testing_use_sendfile(void) 3035 { 3036 int ok = 0; 3037 #ifdef USE_SENDFILE 3038 use_sendfile = 1; 3039 ok = 1; 3040 #endif 3041 #ifdef _EVENT_HAVE_MMAP 3042 use_mmap = 0; 3043 #endif 3044 return ok; 3045 } 3046 int 3047 _evbuffer_testing_use_mmap(void) 3048 { 3049 int ok = 0; 3050 #ifdef USE_SENDFILE 3051 use_sendfile = 0; 3052 #endif 3053 #ifdef _EVENT_HAVE_MMAP 3054 use_mmap = 1; 3055 ok = 1; 3056 #endif 3057 return ok; 3058 } 3059 int 3060 _evbuffer_testing_use_linear_file_access(void) 3061 { 3062 #ifdef USE_SENDFILE 3063 use_sendfile = 0; 3064 #endif 3065 #ifdef _EVENT_HAVE_MMAP 3066 use_mmap = 0; 3067 #endif 3068 return 1; 3069 } 3070