1 /* 2 * Copyright (c) 2002-2007 Niels Provos <[email protected]> 3 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "event2/event-config.h" 29 #include "evconfig-private.h" 30 31 #ifdef _WIN32 32 #include <winsock2.h> 33 #include <windows.h> 34 #include <io.h> 35 #endif 36 37 #ifdef _EVENT_HAVE_VASPRINTF 38 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 39 * stdio.h. This comes from evconfig-private.h. 40 */ 41 #endif 42 43 #include <sys/types.h> 44 45 #ifdef _EVENT_HAVE_SYS_TIME_H 46 #include <sys/time.h> 47 #endif 48 49 #ifdef _EVENT_HAVE_SYS_SOCKET_H 50 #include <sys/socket.h> 51 #endif 52 53 #ifdef _EVENT_HAVE_SYS_UIO_H 54 #include <sys/uio.h> 55 #endif 56 57 #ifdef _EVENT_HAVE_SYS_IOCTL_H 58 #include <sys/ioctl.h> 59 #endif 60 61 #ifdef _EVENT_HAVE_SYS_MMAN_H 62 #include <sys/mman.h> 63 #endif 64 65 #ifdef _EVENT_HAVE_SYS_SENDFILE_H 66 #include <sys/sendfile.h> 67 #endif 68 #ifdef _EVENT_HAVE_SYS_STAT_H 69 #include <sys/stat.h> 70 #endif 71 72 73 #include <errno.h> 74 #include <stdio.h> 75 #include <stdlib.h> 76 #include <string.h> 77 #ifdef _EVENT_HAVE_STDARG_H 78 #include <stdarg.h> 79 #endif 80 #ifdef _EVENT_HAVE_UNISTD_H 81 #include <unistd.h> 82 #endif 83 #include <limits.h> 84 85 #include "event2/event.h" 86 #include "event2/buffer.h" 87 #include "event2/buffer_compat.h" 88 #include "event2/bufferevent.h" 89 #include "event2/bufferevent_compat.h" 90 #include "event2/bufferevent_struct.h" 91 #include "event2/thread.h" 92 #include "log-internal.h" 93 #include "mm-internal.h" 94 #include "util-internal.h" 95 #include "evthread-internal.h" 96 #include "evbuffer-internal.h" 97 #include "bufferevent-internal.h" 98 99 /* some systems do not have MAP_FAILED */ 100 #ifndef MAP_FAILED 101 #define MAP_FAILED ((void *)-1) 102 #endif 103 104 /* send file support */ 105 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__) 106 #define USE_SENDFILE 1 107 #define SENDFILE_IS_LINUX 1 108 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__) 109 #define USE_SENDFILE 1 110 #define SENDFILE_IS_FREEBSD 1 111 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__) 112 #define USE_SENDFILE 1 113 #define SENDFILE_IS_MACOSX 1 114 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 115 #define USE_SENDFILE 1 116 #define SENDFILE_IS_SOLARIS 1 117 #endif 118 119 /* Mask of user-selectable callback flags. */ 120 #define EVBUFFER_CB_USER_FLAGS 0xffff 121 /* Mask of all internal-use-only flags. */ 122 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 123 124 /* Flag set if the callback is using the cb_obsolete function pointer */ 125 #define EVBUFFER_CB_OBSOLETE 0x00040000 126 127 /* evbuffer_chain support */ 128 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 129 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 130 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 131 132 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 133 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 134 135 static void evbuffer_chain_align(struct evbuffer_chain *chain); 136 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 137 size_t datalen); 138 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg); 139 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 140 const struct evbuffer_ptr *pos, const char *mem, size_t len); 141 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 142 size_t datlen); 143 144 static struct evbuffer_chain * 145 evbuffer_chain_new(size_t size) 146 { 147 struct evbuffer_chain *chain; 148 size_t to_alloc; 149 150 size += EVBUFFER_CHAIN_SIZE; 151 152 /* get the next largest memory that can hold the buffer */ 153 to_alloc = MIN_BUFFER_SIZE; 154 while (to_alloc < size) 155 to_alloc <<= 1; 156 157 /* we get everything in one chunk */ 158 if ((chain = mm_malloc(to_alloc)) == NULL) 159 return (NULL); 160 161 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 162 163 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 164 165 /* this way we can manipulate the buffer to different addresses, 166 * which is required for mmap for example. 167 */ 168 chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain); 169 170 return (chain); 171 } 172 173 static inline void 174 evbuffer_chain_free(struct evbuffer_chain *chain) 175 { 176 if (CHAIN_PINNED(chain)) { 177 chain->flags |= EVBUFFER_DANGLING; 178 return; 179 } 180 181 if (chain->flags & EVBUFFER_REFERENCE) { 182 struct evbuffer_chain_reference *info = 183 EVBUFFER_CHAIN_EXTRA( 184 struct evbuffer_chain_reference, 185 chain); 186 if (info->cleanupfn) 187 (*info->cleanupfn)(chain->buffer, 188 chain->buffer_len, 189 info->extra); 190 } 191 if (chain->flags & EVBUFFER_FILESEGMENT) { 192 struct evbuffer_chain_file_segment *info = 193 EVBUFFER_CHAIN_EXTRA( 194 struct evbuffer_chain_file_segment, 195 chain); 196 if (info->segment) { 197 #ifdef _WIN32 198 if (info->segment->type == EVBUF_FS_MMAP) 199 UnmapViewOfFile(chain->buffer); 200 #endif 201 evbuffer_file_segment_free(info->segment); 202 } 203 } 204 205 mm_free(chain); 206 } 207 208 static void 209 evbuffer_free_all_chains(struct evbuffer_chain *chain) 210 { 211 struct evbuffer_chain *next; 212 for (; chain; chain = next) { 213 next = chain->next; 214 evbuffer_chain_free(chain); 215 } 216 } 217 218 #ifndef NDEBUG 219 static int 220 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 221 { 222 for (; chain; chain = chain->next) { 223 if (chain->off) 224 return 0; 225 } 226 return 1; 227 } 228 #else 229 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 230 "unused variable" warnings. */ 231 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 232 return 1; 233 } 234 #endif 235 236 static void 237 evbuffer_chain_insert(struct evbuffer *buf, 238 struct evbuffer_chain *chain) 239 { 240 ASSERT_EVBUFFER_LOCKED(buf); 241 if (*buf->last_with_datap == NULL) { 242 /* There are no chains data on the buffer at all. */ 243 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 244 EVUTIL_ASSERT(buf->first == NULL); 245 buf->first = buf->last = chain; 246 } else { 247 struct evbuffer_chain **ch = buf->last_with_datap; 248 /* Find the first victim chain. It might be *last_with_datap */ 249 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 250 ch = &(*ch)->next; 251 if (*ch == NULL) { 252 /* There is no victim; just append this new chain. */ 253 buf->last->next = chain; 254 if (chain->off) 255 buf->last_with_datap = &buf->last->next; 256 } else { 257 /* Replace all victim chains with this chain. */ 258 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 259 evbuffer_free_all_chains(*ch); 260 *ch = chain; 261 } 262 buf->last = chain; 263 } 264 buf->total_len += chain->off; 265 } 266 267 static inline struct evbuffer_chain * 268 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 269 { 270 struct evbuffer_chain *chain; 271 if ((chain = evbuffer_chain_new(datlen)) == NULL) 272 return NULL; 273 evbuffer_chain_insert(buf, chain); 274 return chain; 275 } 276 277 void 278 _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag) 279 { 280 EVUTIL_ASSERT((chain->flags & flag) == 0); 281 chain->flags |= flag; 282 } 283 284 void 285 _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag) 286 { 287 EVUTIL_ASSERT((chain->flags & flag) != 0); 288 chain->flags &= ~flag; 289 if (chain->flags & EVBUFFER_DANGLING) 290 evbuffer_chain_free(chain); 291 } 292 293 struct evbuffer * 294 evbuffer_new(void) 295 { 296 struct evbuffer *buffer; 297 298 buffer = mm_calloc(1, sizeof(struct evbuffer)); 299 if (buffer == NULL) 300 return (NULL); 301 302 TAILQ_INIT(&buffer->callbacks); 303 buffer->refcnt = 1; 304 buffer->last_with_datap = &buffer->first; 305 306 return (buffer); 307 } 308 309 void 310 _evbuffer_incref(struct evbuffer *buf) 311 { 312 EVBUFFER_LOCK(buf); 313 ++buf->refcnt; 314 EVBUFFER_UNLOCK(buf); 315 } 316 317 void 318 _evbuffer_incref_and_lock(struct evbuffer *buf) 319 { 320 EVBUFFER_LOCK(buf); 321 ++buf->refcnt; 322 } 323 324 int 325 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 326 { 327 EVBUFFER_LOCK(buffer); 328 buffer->cb_queue = event_base_get_deferred_cb_queue(base); 329 buffer->deferred_cbs = 1; 330 event_deferred_cb_init(&buffer->deferred, 331 evbuffer_deferred_callback, buffer); 332 EVBUFFER_UNLOCK(buffer); 333 return 0; 334 } 335 336 int 337 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 338 { 339 #ifdef _EVENT_DISABLE_THREAD_SUPPORT 340 return -1; 341 #else 342 if (buf->lock) 343 return -1; 344 345 if (!lock) { 346 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 347 if (!lock) 348 return -1; 349 buf->lock = lock; 350 buf->own_lock = 1; 351 } else { 352 buf->lock = lock; 353 buf->own_lock = 0; 354 } 355 356 return 0; 357 #endif 358 } 359 360 void 361 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev) 362 { 363 EVBUFFER_LOCK(buf); 364 buf->parent = bev; 365 EVBUFFER_UNLOCK(buf); 366 } 367 368 static void 369 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 370 { 371 struct evbuffer_cb_entry *cbent, *next; 372 struct evbuffer_cb_info info; 373 size_t new_size; 374 ev_uint32_t mask, masked_val; 375 int clear = 1; 376 377 if (running_deferred) { 378 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 379 masked_val = EVBUFFER_CB_ENABLED; 380 } else if (buffer->deferred_cbs) { 381 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 382 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 383 /* Don't zero-out n_add/n_del, since the deferred callbacks 384 will want to see them. */ 385 clear = 0; 386 } else { 387 mask = EVBUFFER_CB_ENABLED; 388 masked_val = EVBUFFER_CB_ENABLED; 389 } 390 391 ASSERT_EVBUFFER_LOCKED(buffer); 392 393 if (TAILQ_EMPTY(&buffer->callbacks)) { 394 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 395 return; 396 } 397 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 398 return; 399 400 new_size = buffer->total_len; 401 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 402 info.n_added = buffer->n_add_for_cb; 403 info.n_deleted = buffer->n_del_for_cb; 404 if (clear) { 405 buffer->n_add_for_cb = 0; 406 buffer->n_del_for_cb = 0; 407 } 408 for (cbent = TAILQ_FIRST(&buffer->callbacks); 409 cbent != TAILQ_END(&buffer->callbacks); 410 cbent = next) { 411 /* Get the 'next' pointer now in case this callback decides 412 * to remove itself or something. */ 413 next = TAILQ_NEXT(cbent, next); 414 415 if ((cbent->flags & mask) != masked_val) 416 continue; 417 418 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 419 cbent->cb.cb_obsolete(buffer, 420 info.orig_size, new_size, cbent->cbarg); 421 else 422 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 423 } 424 } 425 426 static inline void 427 evbuffer_invoke_callbacks(struct evbuffer *buffer) 428 { 429 if (buffer->deferred_cbs) { 430 if (buffer->deferred.queued) 431 return; 432 _evbuffer_incref_and_lock(buffer); 433 if (buffer->parent) 434 bufferevent_incref(buffer->parent); 435 EVBUFFER_UNLOCK(buffer); 436 event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred); 437 } 438 439 evbuffer_run_callbacks(buffer, 0); 440 } 441 442 static void 443 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg) 444 { 445 struct bufferevent *parent = NULL; 446 struct evbuffer *buffer = arg; 447 448 /* XXXX It would be better to run these callbacks without holding the 449 * lock */ 450 EVBUFFER_LOCK(buffer); 451 parent = buffer->parent; 452 evbuffer_run_callbacks(buffer, 1); 453 _evbuffer_decref_and_unlock(buffer); 454 if (parent) 455 bufferevent_decref(parent); 456 } 457 458 static void 459 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 460 { 461 struct evbuffer_cb_entry *cbent; 462 463 while ((cbent = TAILQ_FIRST(&buffer->callbacks))) { 464 TAILQ_REMOVE(&buffer->callbacks, cbent, next); 465 mm_free(cbent); 466 } 467 } 468 469 void 470 _evbuffer_decref_and_unlock(struct evbuffer *buffer) 471 { 472 struct evbuffer_chain *chain, *next; 473 ASSERT_EVBUFFER_LOCKED(buffer); 474 475 EVUTIL_ASSERT(buffer->refcnt > 0); 476 477 if (--buffer->refcnt > 0) { 478 EVBUFFER_UNLOCK(buffer); 479 return; 480 } 481 482 for (chain = buffer->first; chain != NULL; chain = next) { 483 next = chain->next; 484 evbuffer_chain_free(chain); 485 } 486 evbuffer_remove_all_callbacks(buffer); 487 if (buffer->deferred_cbs) 488 event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred); 489 490 EVBUFFER_UNLOCK(buffer); 491 if (buffer->own_lock) 492 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 493 mm_free(buffer); 494 } 495 496 void 497 evbuffer_free(struct evbuffer *buffer) 498 { 499 EVBUFFER_LOCK(buffer); 500 _evbuffer_decref_and_unlock(buffer); 501 } 502 503 void 504 evbuffer_lock(struct evbuffer *buf) 505 { 506 EVBUFFER_LOCK(buf); 507 } 508 509 void 510 evbuffer_unlock(struct evbuffer *buf) 511 { 512 EVBUFFER_UNLOCK(buf); 513 } 514 515 size_t 516 evbuffer_get_length(const struct evbuffer *buffer) 517 { 518 size_t result; 519 520 EVBUFFER_LOCK(buffer); 521 522 result = (buffer->total_len); 523 524 EVBUFFER_UNLOCK(buffer); 525 526 return result; 527 } 528 529 size_t 530 evbuffer_get_contiguous_space(const struct evbuffer *buf) 531 { 532 struct evbuffer_chain *chain; 533 size_t result; 534 535 EVBUFFER_LOCK(buf); 536 chain = buf->first; 537 result = (chain != NULL ? chain->off : 0); 538 EVBUFFER_UNLOCK(buf); 539 540 return result; 541 } 542 543 int 544 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 545 struct evbuffer_iovec *vec, int n_vecs) 546 { 547 struct evbuffer_chain *chain, **chainp; 548 int n = -1; 549 550 EVBUFFER_LOCK(buf); 551 if (buf->freeze_end) 552 goto done; 553 if (n_vecs < 1) 554 goto done; 555 if (n_vecs == 1) { 556 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 557 goto done; 558 559 vec[0].iov_base = CHAIN_SPACE_PTR(chain); 560 vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain); 561 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 562 n = 1; 563 } else { 564 if (_evbuffer_expand_fast(buf, size, n_vecs)<0) 565 goto done; 566 n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs, 567 &chainp, 0); 568 } 569 570 done: 571 EVBUFFER_UNLOCK(buf); 572 return n; 573 574 } 575 576 static int 577 advance_last_with_data(struct evbuffer *buf) 578 { 579 int n = 0; 580 ASSERT_EVBUFFER_LOCKED(buf); 581 582 if (!*buf->last_with_datap) 583 return 0; 584 585 while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) { 586 buf->last_with_datap = &(*buf->last_with_datap)->next; 587 ++n; 588 } 589 return n; 590 } 591 592 int 593 evbuffer_commit_space(struct evbuffer *buf, 594 struct evbuffer_iovec *vec, int n_vecs) 595 { 596 struct evbuffer_chain *chain, **firstchainp, **chainp; 597 int result = -1; 598 size_t added = 0; 599 int i; 600 601 EVBUFFER_LOCK(buf); 602 603 if (buf->freeze_end) 604 goto done; 605 if (n_vecs == 0) { 606 result = 0; 607 goto done; 608 } else if (n_vecs == 1 && 609 (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) { 610 /* The user only got or used one chain; it might not 611 * be the first one with space in it. */ 612 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 613 goto done; 614 buf->last->off += vec[0].iov_len; 615 added = vec[0].iov_len; 616 if (added) 617 advance_last_with_data(buf); 618 goto okay; 619 } 620 621 /* Advance 'firstchain' to the first chain with space in it. */ 622 firstchainp = buf->last_with_datap; 623 if (!*firstchainp) 624 goto done; 625 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 626 firstchainp = &(*firstchainp)->next; 627 } 628 629 chain = *firstchainp; 630 /* pass 1: make sure that the pointers and lengths of vecs[] are in 631 * bounds before we try to commit anything. */ 632 for (i=0; i<n_vecs; ++i) { 633 if (!chain) 634 goto done; 635 if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) || 636 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 637 goto done; 638 chain = chain->next; 639 } 640 /* pass 2: actually adjust all the chains. */ 641 chainp = firstchainp; 642 for (i=0; i<n_vecs; ++i) { 643 (*chainp)->off += vec[i].iov_len; 644 added += vec[i].iov_len; 645 if (vec[i].iov_len) { 646 buf->last_with_datap = chainp; 647 } 648 chainp = &(*chainp)->next; 649 } 650 651 okay: 652 buf->total_len += added; 653 buf->n_add_for_cb += added; 654 result = 0; 655 evbuffer_invoke_callbacks(buf); 656 657 done: 658 EVBUFFER_UNLOCK(buf); 659 return result; 660 } 661 662 static inline int 663 HAS_PINNED_R(struct evbuffer *buf) 664 { 665 return (buf->last && CHAIN_PINNED_R(buf->last)); 666 } 667 668 static inline void 669 ZERO_CHAIN(struct evbuffer *dst) 670 { 671 ASSERT_EVBUFFER_LOCKED(dst); 672 dst->first = NULL; 673 dst->last = NULL; 674 dst->last_with_datap = &(dst)->first; 675 dst->total_len = 0; 676 } 677 678 /* Prepares the contents of src to be moved to another buffer by removing 679 * read-pinned chains. The first pinned chain is saved in first, and the 680 * last in last. If src has no read-pinned chains, first and last are set 681 * to NULL. */ 682 static int 683 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 684 struct evbuffer_chain **last) 685 { 686 struct evbuffer_chain *chain, **pinned; 687 688 ASSERT_EVBUFFER_LOCKED(src); 689 690 if (!HAS_PINNED_R(src)) { 691 *first = *last = NULL; 692 return 0; 693 } 694 695 pinned = src->last_with_datap; 696 if (!CHAIN_PINNED_R(*pinned)) 697 pinned = &(*pinned)->next; 698 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 699 chain = *first = *pinned; 700 *last = src->last; 701 702 /* If there's data in the first pinned chain, we need to allocate 703 * a new chain and copy the data over. */ 704 if (chain->off) { 705 struct evbuffer_chain *tmp; 706 707 EVUTIL_ASSERT(pinned == src->last_with_datap); 708 tmp = evbuffer_chain_new(chain->off); 709 if (!tmp) 710 return -1; 711 memcpy(tmp->buffer, chain->buffer + chain->misalign, 712 chain->off); 713 tmp->off = chain->off; 714 *src->last_with_datap = tmp; 715 src->last = tmp; 716 chain->misalign += chain->off; 717 chain->off = 0; 718 } else { 719 src->last = *src->last_with_datap; 720 *pinned = NULL; 721 } 722 723 return 0; 724 } 725 726 static inline void 727 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 728 struct evbuffer_chain *last) 729 { 730 ASSERT_EVBUFFER_LOCKED(src); 731 732 if (!pinned) { 733 ZERO_CHAIN(src); 734 return; 735 } 736 737 src->first = pinned; 738 src->last = last; 739 src->last_with_datap = &src->first; 740 src->total_len = 0; 741 } 742 743 static inline void 744 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 745 { 746 ASSERT_EVBUFFER_LOCKED(dst); 747 ASSERT_EVBUFFER_LOCKED(src); 748 dst->first = src->first; 749 if (src->last_with_datap == &src->first) 750 dst->last_with_datap = &dst->first; 751 else 752 dst->last_with_datap = src->last_with_datap; 753 dst->last = src->last; 754 dst->total_len = src->total_len; 755 } 756 757 static void 758 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 759 { 760 ASSERT_EVBUFFER_LOCKED(dst); 761 ASSERT_EVBUFFER_LOCKED(src); 762 dst->last->next = src->first; 763 if (src->last_with_datap == &src->first) 764 dst->last_with_datap = &dst->last->next; 765 else 766 dst->last_with_datap = src->last_with_datap; 767 dst->last = src->last; 768 dst->total_len += src->total_len; 769 } 770 771 static void 772 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 773 { 774 ASSERT_EVBUFFER_LOCKED(dst); 775 ASSERT_EVBUFFER_LOCKED(src); 776 src->last->next = dst->first; 777 dst->first = src->first; 778 dst->total_len += src->total_len; 779 if (*dst->last_with_datap == NULL) { 780 if (src->last_with_datap == &(src)->first) 781 dst->last_with_datap = &dst->first; 782 else 783 dst->last_with_datap = src->last_with_datap; 784 } else if (dst->last_with_datap == &dst->first) { 785 dst->last_with_datap = &src->last->next; 786 } 787 } 788 789 int 790 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 791 { 792 struct evbuffer_chain *pinned, *last; 793 size_t in_total_len, out_total_len; 794 int result = 0; 795 796 EVBUFFER_LOCK2(inbuf, outbuf); 797 in_total_len = inbuf->total_len; 798 out_total_len = outbuf->total_len; 799 800 if (in_total_len == 0 || outbuf == inbuf) 801 goto done; 802 803 if (outbuf->freeze_end || inbuf->freeze_start) { 804 result = -1; 805 goto done; 806 } 807 808 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 809 result = -1; 810 goto done; 811 } 812 813 if (out_total_len == 0) { 814 /* There might be an empty chain at the start of outbuf; free 815 * it. */ 816 evbuffer_free_all_chains(outbuf->first); 817 COPY_CHAIN(outbuf, inbuf); 818 } else { 819 APPEND_CHAIN(outbuf, inbuf); 820 } 821 822 RESTORE_PINNED(inbuf, pinned, last); 823 824 inbuf->n_del_for_cb += in_total_len; 825 outbuf->n_add_for_cb += in_total_len; 826 827 evbuffer_invoke_callbacks(inbuf); 828 evbuffer_invoke_callbacks(outbuf); 829 830 done: 831 EVBUFFER_UNLOCK2(inbuf, outbuf); 832 return result; 833 } 834 835 int 836 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 837 { 838 struct evbuffer_chain *pinned, *last; 839 size_t in_total_len, out_total_len; 840 int result = 0; 841 842 EVBUFFER_LOCK2(inbuf, outbuf); 843 844 in_total_len = inbuf->total_len; 845 out_total_len = outbuf->total_len; 846 847 if (!in_total_len || inbuf == outbuf) 848 goto done; 849 850 if (outbuf->freeze_start || inbuf->freeze_start) { 851 result = -1; 852 goto done; 853 } 854 855 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 856 result = -1; 857 goto done; 858 } 859 860 if (out_total_len == 0) { 861 /* There might be an empty chain at the start of outbuf; free 862 * it. */ 863 evbuffer_free_all_chains(outbuf->first); 864 COPY_CHAIN(outbuf, inbuf); 865 } else { 866 PREPEND_CHAIN(outbuf, inbuf); 867 } 868 869 RESTORE_PINNED(inbuf, pinned, last); 870 871 inbuf->n_del_for_cb += in_total_len; 872 outbuf->n_add_for_cb += in_total_len; 873 874 evbuffer_invoke_callbacks(inbuf); 875 evbuffer_invoke_callbacks(outbuf); 876 done: 877 EVBUFFER_UNLOCK2(inbuf, outbuf); 878 return result; 879 } 880 881 int 882 evbuffer_drain(struct evbuffer *buf, size_t len) 883 { 884 struct evbuffer_chain *chain, *next; 885 size_t remaining, old_len; 886 int result = 0; 887 888 EVBUFFER_LOCK(buf); 889 old_len = buf->total_len; 890 891 if (old_len == 0) 892 goto done; 893 894 if (buf->freeze_start) { 895 result = -1; 896 goto done; 897 } 898 899 if (len >= old_len && !HAS_PINNED_R(buf)) { 900 len = old_len; 901 for (chain = buf->first; chain != NULL; chain = next) { 902 next = chain->next; 903 evbuffer_chain_free(chain); 904 } 905 906 ZERO_CHAIN(buf); 907 } else { 908 if (len >= old_len) 909 len = old_len; 910 911 buf->total_len -= len; 912 remaining = len; 913 for (chain = buf->first; 914 remaining >= chain->off; 915 chain = next) { 916 next = chain->next; 917 remaining -= chain->off; 918 919 if (chain == *buf->last_with_datap) { 920 buf->last_with_datap = &buf->first; 921 } 922 if (&chain->next == buf->last_with_datap) 923 buf->last_with_datap = &buf->first; 924 925 if (CHAIN_PINNED_R(chain)) { 926 EVUTIL_ASSERT(remaining == 0); 927 chain->misalign += chain->off; 928 chain->off = 0; 929 break; 930 } else 931 evbuffer_chain_free(chain); 932 } 933 934 buf->first = chain; 935 if (chain) { 936 chain->misalign += remaining; 937 chain->off -= remaining; 938 } 939 } 940 941 buf->n_del_for_cb += len; 942 /* Tell someone about changes in this buffer */ 943 evbuffer_invoke_callbacks(buf); 944 945 done: 946 EVBUFFER_UNLOCK(buf); 947 return result; 948 } 949 950 /* Reads data from an event buffer and drains the bytes read */ 951 int 952 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 953 { 954 ev_ssize_t n; 955 EVBUFFER_LOCK(buf); 956 n = evbuffer_copyout(buf, data_out, datlen); 957 if (n > 0) { 958 if (evbuffer_drain(buf, n)<0) 959 n = -1; 960 } 961 EVBUFFER_UNLOCK(buf); 962 return (int)n; 963 } 964 965 ev_ssize_t 966 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 967 { 968 /*XXX fails badly on sendfile case. */ 969 struct evbuffer_chain *chain; 970 char *data = data_out; 971 size_t nread; 972 ev_ssize_t result = 0; 973 974 EVBUFFER_LOCK(buf); 975 976 chain = buf->first; 977 978 if (datlen >= buf->total_len) 979 datlen = buf->total_len; 980 981 if (datlen == 0) 982 goto done; 983 984 if (buf->freeze_start) { 985 result = -1; 986 goto done; 987 } 988 989 nread = datlen; 990 991 while (datlen && datlen >= chain->off) { 992 memcpy(data, chain->buffer + chain->misalign, chain->off); 993 data += chain->off; 994 datlen -= chain->off; 995 996 chain = chain->next; 997 EVUTIL_ASSERT(chain || datlen==0); 998 } 999 1000 if (datlen) { 1001 EVUTIL_ASSERT(chain); 1002 memcpy(data, chain->buffer + chain->misalign, datlen); 1003 } 1004 1005 result = nread; 1006 done: 1007 EVBUFFER_UNLOCK(buf); 1008 return result; 1009 } 1010 1011 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1012 * possible. */ 1013 /* XXXX should return ev_ssize_t */ 1014 int 1015 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1016 size_t datlen) 1017 { 1018 /*XXX We should have an option to force this to be zero-copy.*/ 1019 1020 /*XXX can fail badly on sendfile case. */ 1021 struct evbuffer_chain *chain, *previous; 1022 size_t nread = 0; 1023 int result; 1024 1025 EVBUFFER_LOCK2(src, dst); 1026 1027 chain = previous = src->first; 1028 1029 if (datlen == 0 || dst == src) { 1030 result = 0; 1031 goto done; 1032 } 1033 1034 if (dst->freeze_end || src->freeze_start) { 1035 result = -1; 1036 goto done; 1037 } 1038 1039 /* short-cut if there is no more data buffered */ 1040 if (datlen >= src->total_len) { 1041 datlen = src->total_len; 1042 evbuffer_add_buffer(dst, src); 1043 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1044 goto done; 1045 } 1046 1047 /* removes chains if possible */ 1048 while (chain->off <= datlen) { 1049 /* We can't remove the last with data from src unless we 1050 * remove all chains, in which case we would have done the if 1051 * block above */ 1052 EVUTIL_ASSERT(chain != *src->last_with_datap); 1053 nread += chain->off; 1054 datlen -= chain->off; 1055 previous = chain; 1056 if (src->last_with_datap == &chain->next) 1057 src->last_with_datap = &src->first; 1058 chain = chain->next; 1059 } 1060 1061 if (nread) { 1062 /* we can remove the chain */ 1063 if (dst->first == NULL) { 1064 dst->first = src->first; 1065 } else { 1066 dst->last->next = src->first; 1067 } 1068 dst->last = previous; 1069 previous->next = NULL; 1070 src->first = chain; 1071 advance_last_with_data(dst); 1072 1073 dst->total_len += nread; 1074 dst->n_add_for_cb += nread; 1075 } 1076 1077 /* we know that there is more data in the src buffer than 1078 * we want to read, so we manually drain the chain */ 1079 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1080 chain->misalign += datlen; 1081 chain->off -= datlen; 1082 nread += datlen; 1083 1084 src->total_len -= nread; 1085 src->n_del_for_cb += nread; 1086 1087 if (nread) { 1088 evbuffer_invoke_callbacks(dst); 1089 evbuffer_invoke_callbacks(src); 1090 } 1091 result = (int)nread;/*XXXX should change return type */ 1092 1093 done: 1094 EVBUFFER_UNLOCK2(src, dst); 1095 return result; 1096 } 1097 1098 unsigned char * 1099 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1100 { 1101 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1102 unsigned char *buffer, *result = NULL; 1103 ev_ssize_t remaining; 1104 int removed_last_with_data = 0; 1105 int removed_last_with_datap = 0; 1106 1107 EVBUFFER_LOCK(buf); 1108 1109 chain = buf->first; 1110 1111 if (size < 0) 1112 size = buf->total_len; 1113 /* if size > buf->total_len, we cannot guarantee to the user that she 1114 * is going to have a long enough buffer afterwards; so we return 1115 * NULL */ 1116 if (size == 0 || (size_t)size > buf->total_len) 1117 goto done; 1118 1119 /* No need to pull up anything; the first size bytes are 1120 * already here. */ 1121 if (chain->off >= (size_t)size) { 1122 result = chain->buffer + chain->misalign; 1123 goto done; 1124 } 1125 1126 /* Make sure that none of the chains we need to copy from is pinned. */ 1127 remaining = size - chain->off; 1128 EVUTIL_ASSERT(remaining >= 0); 1129 for (tmp=chain->next; tmp; tmp=tmp->next) { 1130 if (CHAIN_PINNED(tmp)) 1131 goto done; 1132 if (tmp->off >= (size_t)remaining) 1133 break; 1134 remaining -= tmp->off; 1135 } 1136 1137 if (CHAIN_PINNED(chain)) { 1138 size_t old_off = chain->off; 1139 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1140 /* not enough room at end of chunk. */ 1141 goto done; 1142 } 1143 buffer = CHAIN_SPACE_PTR(chain); 1144 tmp = chain; 1145 tmp->off = size; 1146 size -= old_off; 1147 chain = chain->next; 1148 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1149 /* already have enough space in the first chain */ 1150 size_t old_off = chain->off; 1151 buffer = chain->buffer + chain->misalign + chain->off; 1152 tmp = chain; 1153 tmp->off = size; 1154 size -= old_off; 1155 chain = chain->next; 1156 } else { 1157 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1158 event_warn("%s: out of memory", __func__); 1159 goto done; 1160 } 1161 buffer = tmp->buffer; 1162 tmp->off = size; 1163 buf->first = tmp; 1164 } 1165 1166 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1167 1168 /* Copy and free every chunk that will be entirely pulled into tmp */ 1169 last_with_data = *buf->last_with_datap; 1170 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1171 next = chain->next; 1172 1173 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1174 size -= chain->off; 1175 buffer += chain->off; 1176 if (chain == last_with_data) 1177 removed_last_with_data = 1; 1178 if (&chain->next == buf->last_with_datap) 1179 removed_last_with_datap = 1; 1180 1181 evbuffer_chain_free(chain); 1182 } 1183 1184 if (chain != NULL) { 1185 memcpy(buffer, chain->buffer + chain->misalign, size); 1186 chain->misalign += size; 1187 chain->off -= size; 1188 } else { 1189 buf->last = tmp; 1190 } 1191 1192 tmp->next = chain; 1193 1194 if (removed_last_with_data) { 1195 buf->last_with_datap = &buf->first; 1196 } else if (removed_last_with_datap) { 1197 if (buf->first->next && buf->first->next->off) 1198 buf->last_with_datap = &buf->first->next; 1199 else 1200 buf->last_with_datap = &buf->first; 1201 } 1202 1203 result = (tmp->buffer + tmp->misalign); 1204 1205 done: 1206 EVBUFFER_UNLOCK(buf); 1207 return result; 1208 } 1209 1210 /* 1211 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1212 * The returned buffer needs to be freed by the called. 1213 */ 1214 char * 1215 evbuffer_readline(struct evbuffer *buffer) 1216 { 1217 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1218 } 1219 1220 static inline ev_ssize_t 1221 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1222 { 1223 struct evbuffer_chain *chain = it->_internal.chain; 1224 size_t i = it->_internal.pos_in_chain; 1225 while (chain != NULL) { 1226 char *buffer = (char *)chain->buffer + chain->misalign; 1227 char *cp = memchr(buffer+i, chr, chain->off-i); 1228 if (cp) { 1229 it->_internal.chain = chain; 1230 it->_internal.pos_in_chain = cp - buffer; 1231 it->pos += (cp - buffer); 1232 return it->pos; 1233 } 1234 it->pos += chain->off - i; 1235 i = 0; 1236 chain = chain->next; 1237 } 1238 1239 return (-1); 1240 } 1241 1242 static inline char * 1243 find_eol_char(char *s, size_t len) 1244 { 1245 #define CHUNK_SZ 128 1246 /* Lots of benchmarking found this approach to be faster in practice 1247 * than doing two memchrs over the whole buffer, doin a memchr on each 1248 * char of the buffer, or trying to emulate memchr by hand. */ 1249 char *s_end, *cr, *lf; 1250 s_end = s+len; 1251 while (s < s_end) { 1252 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1253 cr = memchr(s, '\r', chunk); 1254 lf = memchr(s, '\n', chunk); 1255 if (cr) { 1256 if (lf && lf < cr) 1257 return lf; 1258 return cr; 1259 } else if (lf) { 1260 return lf; 1261 } 1262 s += CHUNK_SZ; 1263 } 1264 1265 return NULL; 1266 #undef CHUNK_SZ 1267 } 1268 1269 static ev_ssize_t 1270 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1271 { 1272 struct evbuffer_chain *chain = it->_internal.chain; 1273 size_t i = it->_internal.pos_in_chain; 1274 while (chain != NULL) { 1275 char *buffer = (char *)chain->buffer + chain->misalign; 1276 char *cp = find_eol_char(buffer+i, chain->off-i); 1277 if (cp) { 1278 it->_internal.chain = chain; 1279 it->_internal.pos_in_chain = cp - buffer; 1280 it->pos += (cp - buffer) - i; 1281 return it->pos; 1282 } 1283 it->pos += chain->off - i; 1284 i = 0; 1285 chain = chain->next; 1286 } 1287 1288 return (-1); 1289 } 1290 1291 static inline int 1292 evbuffer_strspn( 1293 struct evbuffer_ptr *ptr, const char *chrset) 1294 { 1295 int count = 0; 1296 struct evbuffer_chain *chain = ptr->_internal.chain; 1297 size_t i = ptr->_internal.pos_in_chain; 1298 1299 if (!chain) 1300 return -1; 1301 1302 while (1) { 1303 char *buffer = (char *)chain->buffer + chain->misalign; 1304 for (; i < chain->off; ++i) { 1305 const char *p = chrset; 1306 while (*p) { 1307 if (buffer[i] == *p++) 1308 goto next; 1309 } 1310 ptr->_internal.chain = chain; 1311 ptr->_internal.pos_in_chain = i; 1312 ptr->pos += count; 1313 return count; 1314 next: 1315 ++count; 1316 } 1317 i = 0; 1318 1319 if (! chain->next) { 1320 ptr->_internal.chain = chain; 1321 ptr->_internal.pos_in_chain = i; 1322 ptr->pos += count; 1323 return count; 1324 } 1325 1326 chain = chain->next; 1327 } 1328 } 1329 1330 1331 static inline char 1332 evbuffer_getchr(struct evbuffer_ptr *it) 1333 { 1334 struct evbuffer_chain *chain = it->_internal.chain; 1335 size_t off = it->_internal.pos_in_chain; 1336 1337 return chain->buffer[chain->misalign + off]; 1338 } 1339 1340 struct evbuffer_ptr 1341 evbuffer_search_eol(struct evbuffer *buffer, 1342 struct evbuffer_ptr *start, size_t *eol_len_out, 1343 enum evbuffer_eol_style eol_style) 1344 { 1345 struct evbuffer_ptr it, it2; 1346 size_t extra_drain = 0; 1347 int ok = 0; 1348 1349 EVBUFFER_LOCK(buffer); 1350 1351 if (start) { 1352 memcpy(&it, start, sizeof(it)); 1353 } else { 1354 it.pos = 0; 1355 it._internal.chain = buffer->first; 1356 it._internal.pos_in_chain = 0; 1357 } 1358 1359 /* the eol_style determines our first stop character and how many 1360 * characters we are going to drain afterwards. */ 1361 switch (eol_style) { 1362 case EVBUFFER_EOL_ANY: 1363 if (evbuffer_find_eol_char(&it) < 0) 1364 goto done; 1365 memcpy(&it2, &it, sizeof(it)); 1366 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1367 break; 1368 case EVBUFFER_EOL_CRLF_STRICT: { 1369 it = evbuffer_search(buffer, "\r\n", 2, &it); 1370 if (it.pos < 0) 1371 goto done; 1372 extra_drain = 2; 1373 break; 1374 } 1375 case EVBUFFER_EOL_CRLF: 1376 while (1) { 1377 if (evbuffer_find_eol_char(&it) < 0) 1378 goto done; 1379 if (evbuffer_getchr(&it) == '\n') { 1380 extra_drain = 1; 1381 break; 1382 } else if (!evbuffer_ptr_memcmp( 1383 buffer, &it, "\r\n", 2)) { 1384 extra_drain = 2; 1385 break; 1386 } else { 1387 if (evbuffer_ptr_set(buffer, &it, 1, 1388 EVBUFFER_PTR_ADD)<0) 1389 goto done; 1390 } 1391 } 1392 break; 1393 case EVBUFFER_EOL_LF: 1394 if (evbuffer_strchr(&it, '\n') < 0) 1395 goto done; 1396 extra_drain = 1; 1397 break; 1398 default: 1399 goto done; 1400 } 1401 1402 ok = 1; 1403 done: 1404 EVBUFFER_UNLOCK(buffer); 1405 1406 if (!ok) { 1407 it.pos = -1; 1408 } 1409 if (eol_len_out) 1410 *eol_len_out = extra_drain; 1411 1412 return it; 1413 } 1414 1415 char * 1416 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1417 enum evbuffer_eol_style eol_style) 1418 { 1419 struct evbuffer_ptr it; 1420 char *line; 1421 size_t n_to_copy=0, extra_drain=0; 1422 char *result = NULL; 1423 1424 EVBUFFER_LOCK(buffer); 1425 1426 if (buffer->freeze_start) { 1427 goto done; 1428 } 1429 1430 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1431 if (it.pos < 0) 1432 goto done; 1433 n_to_copy = it.pos; 1434 1435 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1436 event_warn("%s: out of memory", __func__); 1437 goto done; 1438 } 1439 1440 evbuffer_remove(buffer, line, n_to_copy); 1441 line[n_to_copy] = '\0'; 1442 1443 evbuffer_drain(buffer, extra_drain); 1444 result = line; 1445 done: 1446 EVBUFFER_UNLOCK(buffer); 1447 1448 if (n_read_out) 1449 *n_read_out = result ? n_to_copy : 0; 1450 1451 return result; 1452 } 1453 1454 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1455 1456 /* Adds data to an event buffer */ 1457 1458 int 1459 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1460 { 1461 struct evbuffer_chain *chain, *tmp; 1462 const unsigned char *data = data_in; 1463 size_t remain, to_alloc; 1464 int result = -1; 1465 1466 EVBUFFER_LOCK(buf); 1467 1468 if (buf->freeze_end) { 1469 goto done; 1470 } 1471 1472 chain = buf->last; 1473 1474 /* If there are no chains allocated for this buffer, allocate one 1475 * big enough to hold all the data. */ 1476 if (chain == NULL) { 1477 chain = evbuffer_chain_new(datlen); 1478 if (!chain) 1479 goto done; 1480 evbuffer_chain_insert(buf, chain); 1481 } 1482 1483 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1484 remain = (size_t)(chain->buffer_len - chain->misalign - chain->off); 1485 if (remain >= datlen) { 1486 /* there's enough space to hold all the data in the 1487 * current last chain */ 1488 memcpy(chain->buffer + chain->misalign + chain->off, 1489 data, datlen); 1490 chain->off += datlen; 1491 buf->total_len += datlen; 1492 buf->n_add_for_cb += datlen; 1493 goto out; 1494 } else if (!CHAIN_PINNED(chain) && 1495 evbuffer_chain_should_realign(chain, datlen)) { 1496 /* we can fit the data into the misalignment */ 1497 evbuffer_chain_align(chain); 1498 1499 memcpy(chain->buffer + chain->off, data, datlen); 1500 chain->off += datlen; 1501 buf->total_len += datlen; 1502 buf->n_add_for_cb += datlen; 1503 goto out; 1504 } 1505 } else { 1506 /* we cannot write any data to the last chain */ 1507 remain = 0; 1508 } 1509 1510 /* we need to add another chain */ 1511 to_alloc = chain->buffer_len; 1512 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1513 to_alloc <<= 1; 1514 if (datlen > to_alloc) 1515 to_alloc = datlen; 1516 tmp = evbuffer_chain_new(to_alloc); 1517 if (tmp == NULL) 1518 goto done; 1519 1520 if (remain) { 1521 memcpy(chain->buffer + chain->misalign + chain->off, 1522 data, remain); 1523 chain->off += remain; 1524 buf->total_len += remain; 1525 buf->n_add_for_cb += remain; 1526 } 1527 1528 data += remain; 1529 datlen -= remain; 1530 1531 memcpy(tmp->buffer, data, datlen); 1532 tmp->off = datlen; 1533 evbuffer_chain_insert(buf, tmp); 1534 1535 out: 1536 evbuffer_invoke_callbacks(buf); 1537 result = 0; 1538 done: 1539 EVBUFFER_UNLOCK(buf); 1540 return result; 1541 } 1542 1543 int 1544 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1545 { 1546 struct evbuffer_chain *chain, *tmp; 1547 int result = -1; 1548 1549 EVBUFFER_LOCK(buf); 1550 1551 if (buf->freeze_start) { 1552 goto done; 1553 } 1554 1555 chain = buf->first; 1556 1557 if (chain == NULL) { 1558 chain = evbuffer_chain_new(datlen); 1559 if (!chain) 1560 goto done; 1561 evbuffer_chain_insert(buf, chain); 1562 } 1563 1564 /* we cannot touch immutable buffers */ 1565 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1566 /* If this chain is empty, we can treat it as 1567 * 'empty at the beginning' rather than 'empty at the end' */ 1568 if (chain->off == 0) 1569 chain->misalign = chain->buffer_len; 1570 1571 if ((size_t)chain->misalign >= datlen) { 1572 /* we have enough space to fit everything */ 1573 memcpy(chain->buffer + chain->misalign - datlen, 1574 data, datlen); 1575 chain->off += datlen; 1576 chain->misalign -= datlen; 1577 buf->total_len += datlen; 1578 buf->n_add_for_cb += datlen; 1579 goto out; 1580 } else if (chain->misalign) { 1581 /* we can only fit some of the data. */ 1582 memcpy(chain->buffer, 1583 (char*)data + datlen - chain->misalign, 1584 (size_t)chain->misalign); 1585 chain->off += (size_t)chain->misalign; 1586 buf->total_len += (size_t)chain->misalign; 1587 buf->n_add_for_cb += (size_t)chain->misalign; 1588 datlen -= (size_t)chain->misalign; 1589 chain->misalign = 0; 1590 } 1591 } 1592 1593 /* we need to add another chain */ 1594 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1595 goto done; 1596 buf->first = tmp; 1597 if (buf->last_with_datap == &buf->first) 1598 buf->last_with_datap = &tmp->next; 1599 1600 tmp->next = chain; 1601 1602 tmp->off = datlen; 1603 tmp->misalign = tmp->buffer_len - datlen; 1604 1605 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1606 buf->total_len += datlen; 1607 buf->n_add_for_cb += (size_t)chain->misalign; 1608 1609 out: 1610 evbuffer_invoke_callbacks(buf); 1611 result = 0; 1612 done: 1613 EVBUFFER_UNLOCK(buf); 1614 return result; 1615 } 1616 1617 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1618 static void 1619 evbuffer_chain_align(struct evbuffer_chain *chain) 1620 { 1621 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1622 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1623 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1624 chain->misalign = 0; 1625 } 1626 1627 #define MAX_TO_COPY_IN_EXPAND 4096 1628 #define MAX_TO_REALIGN_IN_EXPAND 2048 1629 1630 /** Helper: return true iff we should realign chain to fit datalen bytes of 1631 data in it. */ 1632 static int 1633 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1634 size_t datlen) 1635 { 1636 return chain->buffer_len - chain->off >= datlen && 1637 (chain->off < chain->buffer_len / 2) && 1638 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1639 } 1640 1641 /* Expands the available space in the event buffer to at least datlen, all in 1642 * a single chunk. Return that chunk. */ 1643 static struct evbuffer_chain * 1644 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1645 { 1646 struct evbuffer_chain *chain, **chainp; 1647 struct evbuffer_chain *result = NULL; 1648 ASSERT_EVBUFFER_LOCKED(buf); 1649 1650 chainp = buf->last_with_datap; 1651 1652 /* XXX If *chainp is no longer writeable, but has enough space in its 1653 * misalign, this might be a bad idea: we could still use *chainp, not 1654 * (*chainp)->next. */ 1655 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1656 chainp = &(*chainp)->next; 1657 1658 /* 'chain' now points to the first chain with writable space (if any) 1659 * We will either use it, realign it, replace it, or resize it. */ 1660 chain = *chainp; 1661 1662 if (chain == NULL || 1663 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1664 /* We can't use the last_with_data chain at all. Just add a 1665 * new one that's big enough. */ 1666 goto insert_new; 1667 } 1668 1669 /* If we can fit all the data, then we don't have to do anything */ 1670 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1671 result = chain; 1672 goto ok; 1673 } 1674 1675 /* If the chain is completely empty, just replace it by adding a new 1676 * empty chain. */ 1677 if (chain->off == 0) { 1678 goto insert_new; 1679 } 1680 1681 /* If the misalignment plus the remaining space fulfills our data 1682 * needs, we could just force an alignment to happen. Afterwards, we 1683 * have enough space. But only do this if we're saving a lot of space 1684 * and not moving too much data. Otherwise the space savings are 1685 * probably offset by the time lost in copying. 1686 */ 1687 if (evbuffer_chain_should_realign(chain, datlen)) { 1688 evbuffer_chain_align(chain); 1689 result = chain; 1690 goto ok; 1691 } 1692 1693 /* At this point, we can either resize the last chunk with space in 1694 * it, use the next chunk after it, or If we add a new chunk, we waste 1695 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1696 * resize, we have to copy chain->off bytes. 1697 */ 1698 1699 /* Would expanding this chunk be affordable and worthwhile? */ 1700 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1701 chain->off > MAX_TO_COPY_IN_EXPAND) { 1702 /* It's not worth resizing this chain. Can the next one be 1703 * used? */ 1704 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 1705 /* Yes, we can just use the next chain (which should 1706 * be empty. */ 1707 result = chain->next; 1708 goto ok; 1709 } else { 1710 /* No; append a new chain (which will free all 1711 * terminal empty chains.) */ 1712 goto insert_new; 1713 } 1714 } else { 1715 /* Okay, we're going to try to resize this chain: Not doing so 1716 * would waste at least 1/8 of its current allocation, and we 1717 * can do so without having to copy more than 1718 * MAX_TO_COPY_IN_EXPAND bytes. */ 1719 /* figure out how much space we need */ 1720 size_t length = chain->off + datlen; 1721 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 1722 if (tmp == NULL) 1723 goto err; 1724 1725 /* copy the data over that we had so far */ 1726 tmp->off = chain->off; 1727 memcpy(tmp->buffer, chain->buffer + chain->misalign, 1728 chain->off); 1729 /* fix up the list */ 1730 EVUTIL_ASSERT(*chainp == chain); 1731 result = *chainp = tmp; 1732 1733 if (buf->last == chain) 1734 buf->last = tmp; 1735 1736 tmp->next = chain->next; 1737 evbuffer_chain_free(chain); 1738 goto ok; 1739 } 1740 1741 insert_new: 1742 result = evbuffer_chain_insert_new(buf, datlen); 1743 if (!result) 1744 goto err; 1745 ok: 1746 EVUTIL_ASSERT(result); 1747 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 1748 err: 1749 return result; 1750 } 1751 1752 /* Make sure that datlen bytes are available for writing in the last n 1753 * chains. Never copies or moves data. */ 1754 int 1755 _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n) 1756 { 1757 struct evbuffer_chain *chain = buf->last, *tmp, *next; 1758 size_t avail; 1759 int used; 1760 1761 ASSERT_EVBUFFER_LOCKED(buf); 1762 EVUTIL_ASSERT(n >= 2); 1763 1764 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 1765 /* There is no last chunk, or we can't touch the last chunk. 1766 * Just add a new chunk. */ 1767 chain = evbuffer_chain_new(datlen); 1768 if (chain == NULL) 1769 return (-1); 1770 1771 evbuffer_chain_insert(buf, chain); 1772 return (0); 1773 } 1774 1775 used = 0; /* number of chains we're using space in. */ 1776 avail = 0; /* how much space they have. */ 1777 /* How many bytes can we stick at the end of buffer as it is? Iterate 1778 * over the chains at the end of the buffer, tring to see how much 1779 * space we have in the first n. */ 1780 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 1781 if (chain->off) { 1782 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 1783 EVUTIL_ASSERT(chain == *buf->last_with_datap); 1784 if (space) { 1785 avail += space; 1786 ++used; 1787 } 1788 } else { 1789 /* No data in chain; realign it. */ 1790 chain->misalign = 0; 1791 avail += chain->buffer_len; 1792 ++used; 1793 } 1794 if (avail >= datlen) { 1795 /* There is already enough space. Just return */ 1796 return (0); 1797 } 1798 if (used == n) 1799 break; 1800 } 1801 1802 /* There wasn't enough space in the first n chains with space in 1803 * them. Either add a new chain with enough space, or replace all 1804 * empty chains with one that has enough space, depending on n. */ 1805 if (used < n) { 1806 /* The loop ran off the end of the chains before it hit n 1807 * chains; we can add another. */ 1808 EVUTIL_ASSERT(chain == NULL); 1809 1810 tmp = evbuffer_chain_new(datlen - avail); 1811 if (tmp == NULL) 1812 return (-1); 1813 1814 buf->last->next = tmp; 1815 buf->last = tmp; 1816 /* (we would only set last_with_data if we added the first 1817 * chain. But if the buffer had no chains, we would have 1818 * just allocated a new chain earlier) */ 1819 return (0); 1820 } else { 1821 /* Nuke _all_ the empty chains. */ 1822 int rmv_all = 0; /* True iff we removed last_with_data. */ 1823 chain = *buf->last_with_datap; 1824 if (!chain->off) { 1825 EVUTIL_ASSERT(chain == buf->first); 1826 rmv_all = 1; 1827 avail = 0; 1828 } else { 1829 avail = (size_t) CHAIN_SPACE_LEN(chain); 1830 chain = chain->next; 1831 } 1832 1833 1834 for (; chain; chain = next) { 1835 next = chain->next; 1836 EVUTIL_ASSERT(chain->off == 0); 1837 evbuffer_chain_free(chain); 1838 } 1839 tmp = evbuffer_chain_new(datlen - avail); 1840 if (tmp == NULL) { 1841 if (rmv_all) { 1842 ZERO_CHAIN(buf); 1843 } else { 1844 buf->last = *buf->last_with_datap; 1845 (*buf->last_with_datap)->next = NULL; 1846 } 1847 return (-1); 1848 } 1849 1850 if (rmv_all) { 1851 buf->first = buf->last = tmp; 1852 buf->last_with_datap = &buf->first; 1853 } else { 1854 (*buf->last_with_datap)->next = tmp; 1855 buf->last = tmp; 1856 } 1857 return (0); 1858 } 1859 } 1860 1861 int 1862 evbuffer_expand(struct evbuffer *buf, size_t datlen) 1863 { 1864 struct evbuffer_chain *chain; 1865 1866 EVBUFFER_LOCK(buf); 1867 chain = evbuffer_expand_singlechain(buf, datlen); 1868 EVBUFFER_UNLOCK(buf); 1869 return chain ? 0 : -1; 1870 } 1871 1872 /* 1873 * Reads data from a file descriptor into a buffer. 1874 */ 1875 1876 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(_WIN32) 1877 #define USE_IOVEC_IMPL 1878 #endif 1879 1880 #ifdef USE_IOVEC_IMPL 1881 1882 #ifdef _EVENT_HAVE_SYS_UIO_H 1883 /* number of iovec we use for writev, fragmentation is going to determine 1884 * how much we end up writing */ 1885 1886 #define DEFAULT_WRITE_IOVEC 128 1887 1888 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 1889 #define NUM_WRITE_IOVEC UIO_MAXIOV 1890 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 1891 #define NUM_WRITE_IOVEC IOV_MAX 1892 #else 1893 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 1894 #endif 1895 1896 #define IOV_TYPE struct iovec 1897 #define IOV_PTR_FIELD iov_base 1898 #define IOV_LEN_FIELD iov_len 1899 #define IOV_LEN_TYPE size_t 1900 #else 1901 #define NUM_WRITE_IOVEC 16 1902 #define IOV_TYPE WSABUF 1903 #define IOV_PTR_FIELD buf 1904 #define IOV_LEN_FIELD len 1905 #define IOV_LEN_TYPE unsigned long 1906 #endif 1907 #endif 1908 #define NUM_READ_IOVEC 4 1909 1910 #define EVBUFFER_MAX_READ 4096 1911 1912 /** Helper function to figure out which space to use for reading data into 1913 an evbuffer. Internal use only. 1914 1915 @param buf The buffer to read into 1916 @param howmuch How much we want to read. 1917 @param vecs An array of two or more iovecs or WSABUFs. 1918 @param n_vecs_avail The length of vecs 1919 @param chainp A pointer to a variable to hold the first chain we're 1920 reading into. 1921 @param exact Boolean: if true, we do not provide more than 'howmuch' 1922 space in the vectors, even if more space is available. 1923 @return The number of buffers we're using. 1924 */ 1925 int 1926 _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch, 1927 struct evbuffer_iovec *vecs, int n_vecs_avail, 1928 struct evbuffer_chain ***chainp, int exact) 1929 { 1930 struct evbuffer_chain *chain; 1931 struct evbuffer_chain **firstchainp; 1932 size_t so_far; 1933 int i; 1934 ASSERT_EVBUFFER_LOCKED(buf); 1935 1936 if (howmuch < 0) 1937 return -1; 1938 1939 so_far = 0; 1940 /* Let firstchain be the first chain with any space on it */ 1941 firstchainp = buf->last_with_datap; 1942 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 1943 firstchainp = &(*firstchainp)->next; 1944 } 1945 1946 chain = *firstchainp; 1947 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 1948 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 1949 if (avail > (howmuch - so_far) && exact) 1950 avail = howmuch - so_far; 1951 vecs[i].iov_base = CHAIN_SPACE_PTR(chain); 1952 vecs[i].iov_len = avail; 1953 so_far += avail; 1954 chain = chain->next; 1955 } 1956 1957 *chainp = firstchainp; 1958 return i; 1959 } 1960 1961 static int 1962 get_n_bytes_readable_on_socket(evutil_socket_t fd) 1963 { 1964 #if defined(FIONREAD) && defined(_WIN32) 1965 unsigned long lng = EVBUFFER_MAX_READ; 1966 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 1967 return -1; 1968 return (int)lng; 1969 #elif defined(FIONREAD) 1970 int n = EVBUFFER_MAX_READ; 1971 if (ioctl(fd, FIONREAD, &n) < 0) 1972 return -1; 1973 return n; 1974 #else 1975 return EVBUFFER_MAX_READ; 1976 #endif 1977 } 1978 1979 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 1980 * as howmuch? */ 1981 int 1982 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 1983 { 1984 struct evbuffer_chain **chainp; 1985 int n; 1986 int result; 1987 1988 #ifdef USE_IOVEC_IMPL 1989 int nvecs, i, remaining; 1990 #else 1991 struct evbuffer_chain *chain; 1992 unsigned char *p; 1993 #endif 1994 1995 EVBUFFER_LOCK(buf); 1996 1997 if (buf->freeze_end) { 1998 result = -1; 1999 goto done; 2000 } 2001 2002 n = get_n_bytes_readable_on_socket(fd); 2003 if (n <= 0 || n > EVBUFFER_MAX_READ) 2004 n = EVBUFFER_MAX_READ; 2005 if (howmuch < 0 || howmuch > n) 2006 howmuch = n; 2007 2008 #ifdef USE_IOVEC_IMPL 2009 /* Since we can use iovecs, we're willing to use the last 2010 * NUM_READ_IOVEC chains. */ 2011 if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) { 2012 result = -1; 2013 goto done; 2014 } else { 2015 IOV_TYPE vecs[NUM_READ_IOVEC]; 2016 #ifdef _EVBUFFER_IOVEC_IS_NATIVE 2017 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs, 2018 NUM_READ_IOVEC, &chainp, 1); 2019 #else 2020 /* We aren't using the native struct iovec. Therefore, 2021 we are on win32. */ 2022 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2023 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2, 2024 &chainp, 1); 2025 2026 for (i=0; i < nvecs; ++i) 2027 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2028 #endif 2029 2030 #ifdef _WIN32 2031 { 2032 DWORD bytesRead; 2033 DWORD flags=0; 2034 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2035 /* The read failed. It might be a close, 2036 * or it might be an error. */ 2037 if (WSAGetLastError() == WSAECONNABORTED) 2038 n = 0; 2039 else 2040 n = -1; 2041 } else 2042 n = bytesRead; 2043 } 2044 #else 2045 n = readv(fd, vecs, nvecs); 2046 #endif 2047 } 2048 2049 #else /*!USE_IOVEC_IMPL*/ 2050 /* If we don't have FIONREAD, we might waste some space here */ 2051 /* XXX we _will_ waste some space here if there is any space left 2052 * over on buf->last. */ 2053 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2054 result = -1; 2055 goto done; 2056 } 2057 2058 /* We can append new data at this point */ 2059 p = chain->buffer + chain->misalign + chain->off; 2060 2061 #ifndef _WIN32 2062 n = read(fd, p, howmuch); 2063 #else 2064 n = recv(fd, p, howmuch, 0); 2065 #endif 2066 #endif /* USE_IOVEC_IMPL */ 2067 2068 if (n == -1) { 2069 result = -1; 2070 goto done; 2071 } 2072 if (n == 0) { 2073 result = 0; 2074 goto done; 2075 } 2076 2077 #ifdef USE_IOVEC_IMPL 2078 remaining = n; 2079 for (i=0; i < nvecs; ++i) { 2080 ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp); 2081 if (space < remaining) { 2082 (*chainp)->off += space; 2083 remaining -= (int)space; 2084 } else { 2085 (*chainp)->off += remaining; 2086 buf->last_with_datap = chainp; 2087 break; 2088 } 2089 chainp = &(*chainp)->next; 2090 } 2091 #else 2092 chain->off += n; 2093 advance_last_with_data(buf); 2094 #endif 2095 buf->total_len += n; 2096 buf->n_add_for_cb += n; 2097 2098 /* Tell someone about changes in this buffer */ 2099 evbuffer_invoke_callbacks(buf); 2100 result = n; 2101 done: 2102 EVBUFFER_UNLOCK(buf); 2103 return result; 2104 } 2105 2106 #ifdef USE_IOVEC_IMPL 2107 static inline int 2108 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2109 ev_ssize_t howmuch) 2110 { 2111 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2112 struct evbuffer_chain *chain = buffer->first; 2113 int n, i = 0; 2114 2115 if (howmuch < 0) 2116 return -1; 2117 2118 ASSERT_EVBUFFER_LOCKED(buffer); 2119 /* XXX make this top out at some maximal data length? if the 2120 * buffer has (say) 1MB in it, split over 128 chains, there's 2121 * no way it all gets written in one go. */ 2122 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2123 #ifdef USE_SENDFILE 2124 /* we cannot write the file info via writev */ 2125 if (chain->flags & EVBUFFER_SENDFILE) 2126 break; 2127 #endif 2128 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2129 if ((size_t)howmuch >= chain->off) { 2130 /* XXXcould be problematic when windows supports mmap*/ 2131 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2132 howmuch -= chain->off; 2133 } else { 2134 /* XXXcould be problematic when windows supports mmap*/ 2135 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2136 break; 2137 } 2138 chain = chain->next; 2139 } 2140 #ifdef _WIN32 2141 { 2142 DWORD bytesSent; 2143 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2144 n = -1; 2145 else 2146 n = bytesSent; 2147 } 2148 #else 2149 n = writev(fd, iov, i); 2150 #endif 2151 return (n); 2152 } 2153 #endif 2154 2155 #ifdef USE_SENDFILE 2156 static inline int 2157 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2158 ev_ssize_t howmuch) 2159 { 2160 struct evbuffer_chain *chain = buffer->first; 2161 struct evbuffer_chain_file_segment *info = 2162 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2163 chain); 2164 const int source_fd = info->segment->fd; 2165 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2166 int res; 2167 ev_off_t len = chain->off; 2168 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2169 ev_ssize_t res; 2170 ev_off_t offset = chain->misalign; 2171 #endif 2172 2173 ASSERT_EVBUFFER_LOCKED(buffer); 2174 2175 #if defined(SENDFILE_IS_MACOSX) 2176 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2177 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2178 return (-1); 2179 2180 return (len); 2181 #elif defined(SENDFILE_IS_FREEBSD) 2182 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2183 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2184 return (-1); 2185 2186 return (len); 2187 #elif defined(SENDFILE_IS_LINUX) 2188 /* TODO(niels): implement splice */ 2189 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2190 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2191 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2192 return (0); 2193 } 2194 return (res); 2195 #elif defined(SENDFILE_IS_SOLARIS) 2196 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2197 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2198 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2199 return (0); 2200 } 2201 return (res); 2202 #endif 2203 } 2204 #endif 2205 2206 int 2207 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2208 ev_ssize_t howmuch) 2209 { 2210 int n = -1; 2211 2212 EVBUFFER_LOCK(buffer); 2213 2214 if (buffer->freeze_start) { 2215 goto done; 2216 } 2217 2218 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2219 howmuch = buffer->total_len; 2220 2221 if (howmuch > 0) { 2222 #ifdef USE_SENDFILE 2223 struct evbuffer_chain *chain = buffer->first; 2224 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2225 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2226 else { 2227 #endif 2228 #ifdef USE_IOVEC_IMPL 2229 n = evbuffer_write_iovec(buffer, fd, howmuch); 2230 #elif defined(_WIN32) 2231 /* XXX(nickm) Don't disable this code until we know if 2232 * the WSARecv code above works. */ 2233 void *p = evbuffer_pullup(buffer, howmuch); 2234 n = send(fd, p, howmuch, 0); 2235 #else 2236 void *p = evbuffer_pullup(buffer, howmuch); 2237 n = write(fd, p, howmuch); 2238 #endif 2239 #ifdef USE_SENDFILE 2240 } 2241 #endif 2242 } 2243 2244 if (n > 0) 2245 evbuffer_drain(buffer, n); 2246 2247 done: 2248 EVBUFFER_UNLOCK(buffer); 2249 return (n); 2250 } 2251 2252 int 2253 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2254 { 2255 return evbuffer_write_atmost(buffer, fd, -1); 2256 } 2257 2258 unsigned char * 2259 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2260 { 2261 unsigned char *search; 2262 struct evbuffer_ptr ptr; 2263 2264 EVBUFFER_LOCK(buffer); 2265 2266 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2267 if (ptr.pos < 0) { 2268 search = NULL; 2269 } else { 2270 search = evbuffer_pullup(buffer, ptr.pos + len); 2271 if (search) 2272 search += ptr.pos; 2273 } 2274 EVBUFFER_UNLOCK(buffer); 2275 return search; 2276 } 2277 2278 int 2279 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2280 size_t position, enum evbuffer_ptr_how how) 2281 { 2282 size_t left = position; 2283 struct evbuffer_chain *chain = NULL; 2284 2285 EVBUFFER_LOCK(buf); 2286 2287 switch (how) { 2288 case EVBUFFER_PTR_SET: 2289 chain = buf->first; 2290 pos->pos = position; 2291 position = 0; 2292 break; 2293 case EVBUFFER_PTR_ADD: 2294 /* this avoids iterating over all previous chains if 2295 we just want to advance the position */ 2296 chain = pos->_internal.chain; 2297 pos->pos += position; 2298 position = pos->_internal.pos_in_chain; 2299 break; 2300 } 2301 2302 while (chain && position + left >= chain->off) { 2303 left -= chain->off - position; 2304 chain = chain->next; 2305 position = 0; 2306 } 2307 if (chain) { 2308 pos->_internal.chain = chain; 2309 pos->_internal.pos_in_chain = position + left; 2310 } else { 2311 pos->_internal.chain = NULL; 2312 pos->pos = -1; 2313 } 2314 2315 EVBUFFER_UNLOCK(buf); 2316 2317 return chain != NULL ? 0 : -1; 2318 } 2319 2320 /** 2321 Compare the bytes in buf at position pos to the len bytes in mem. Return 2322 less than 0, 0, or greater than 0 as memcmp. 2323 */ 2324 static int 2325 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2326 const char *mem, size_t len) 2327 { 2328 struct evbuffer_chain *chain; 2329 size_t position; 2330 int r; 2331 2332 ASSERT_EVBUFFER_LOCKED(buf); 2333 2334 if (pos->pos + len > buf->total_len) 2335 return -1; 2336 2337 chain = pos->_internal.chain; 2338 position = pos->_internal.pos_in_chain; 2339 while (len && chain) { 2340 size_t n_comparable; 2341 if (len + position > chain->off) 2342 n_comparable = chain->off - position; 2343 else 2344 n_comparable = len; 2345 r = memcmp(chain->buffer + chain->misalign + position, mem, 2346 n_comparable); 2347 if (r) 2348 return r; 2349 mem += n_comparable; 2350 len -= n_comparable; 2351 position = 0; 2352 chain = chain->next; 2353 } 2354 2355 return 0; 2356 } 2357 2358 struct evbuffer_ptr 2359 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2360 { 2361 return evbuffer_search_range(buffer, what, len, start, NULL); 2362 } 2363 2364 struct evbuffer_ptr 2365 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2366 { 2367 struct evbuffer_ptr pos; 2368 struct evbuffer_chain *chain, *last_chain = NULL; 2369 const unsigned char *p; 2370 char first; 2371 2372 EVBUFFER_LOCK(buffer); 2373 2374 if (start) { 2375 memcpy(&pos, start, sizeof(pos)); 2376 chain = pos._internal.chain; 2377 } else { 2378 pos.pos = 0; 2379 chain = pos._internal.chain = buffer->first; 2380 pos._internal.pos_in_chain = 0; 2381 } 2382 2383 if (end) 2384 last_chain = end->_internal.chain; 2385 2386 if (!len || len > EV_SSIZE_MAX) 2387 goto done; 2388 2389 first = what[0]; 2390 2391 while (chain) { 2392 const unsigned char *start_at = 2393 chain->buffer + chain->misalign + 2394 pos._internal.pos_in_chain; 2395 p = memchr(start_at, first, 2396 chain->off - pos._internal.pos_in_chain); 2397 if (p) { 2398 pos.pos += p - start_at; 2399 pos._internal.pos_in_chain += p - start_at; 2400 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2401 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2402 goto not_found; 2403 else 2404 goto done; 2405 } 2406 ++pos.pos; 2407 ++pos._internal.pos_in_chain; 2408 if (pos._internal.pos_in_chain == chain->off) { 2409 chain = pos._internal.chain = chain->next; 2410 pos._internal.pos_in_chain = 0; 2411 } 2412 } else { 2413 if (chain == last_chain) 2414 goto not_found; 2415 pos.pos += chain->off - pos._internal.pos_in_chain; 2416 chain = pos._internal.chain = chain->next; 2417 pos._internal.pos_in_chain = 0; 2418 } 2419 } 2420 2421 not_found: 2422 pos.pos = -1; 2423 pos._internal.chain = NULL; 2424 done: 2425 EVBUFFER_UNLOCK(buffer); 2426 return pos; 2427 } 2428 2429 int 2430 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2431 struct evbuffer_ptr *start_at, 2432 struct evbuffer_iovec *vec, int n_vec) 2433 { 2434 struct evbuffer_chain *chain; 2435 int idx = 0; 2436 ev_ssize_t len_so_far = 0; 2437 2438 EVBUFFER_LOCK(buffer); 2439 2440 if (start_at) { 2441 chain = start_at->_internal.chain; 2442 len_so_far = chain->off 2443 - start_at->_internal.pos_in_chain; 2444 idx = 1; 2445 if (n_vec > 0) { 2446 vec[0].iov_base = chain->buffer + chain->misalign 2447 + start_at->_internal.pos_in_chain; 2448 vec[0].iov_len = len_so_far; 2449 } 2450 chain = chain->next; 2451 } else { 2452 chain = buffer->first; 2453 } 2454 2455 while (chain) { 2456 if (len >= 0 && len_so_far >= len) 2457 break; 2458 if (idx<n_vec) { 2459 vec[idx].iov_base = chain->buffer + chain->misalign; 2460 vec[idx].iov_len = chain->off; 2461 } else if (len<0) 2462 break; 2463 ++idx; 2464 len_so_far += chain->off; 2465 chain = chain->next; 2466 } 2467 2468 EVBUFFER_UNLOCK(buffer); 2469 2470 return idx; 2471 } 2472 2473 2474 int 2475 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2476 { 2477 char *buffer; 2478 size_t space; 2479 int sz, result = -1; 2480 va_list aq; 2481 struct evbuffer_chain *chain; 2482 2483 2484 EVBUFFER_LOCK(buf); 2485 2486 if (buf->freeze_end) { 2487 goto done; 2488 } 2489 2490 /* make sure that at least some space is available */ 2491 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2492 goto done; 2493 2494 for (;;) { 2495 #if 0 2496 size_t used = chain->misalign + chain->off; 2497 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2498 EVUTIL_ASSERT(chain->buffer_len >= used); 2499 space = chain->buffer_len - used; 2500 #endif 2501 buffer = (char*) CHAIN_SPACE_PTR(chain); 2502 space = (size_t) CHAIN_SPACE_LEN(chain); 2503 2504 #ifndef va_copy 2505 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2506 #endif 2507 va_copy(aq, ap); 2508 2509 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2510 2511 va_end(aq); 2512 2513 if (sz < 0) 2514 goto done; 2515 if ((size_t)sz < space) { 2516 chain->off += sz; 2517 buf->total_len += sz; 2518 buf->n_add_for_cb += sz; 2519 2520 advance_last_with_data(buf); 2521 evbuffer_invoke_callbacks(buf); 2522 result = sz; 2523 goto done; 2524 } 2525 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2526 goto done; 2527 } 2528 /* NOTREACHED */ 2529 2530 done: 2531 EVBUFFER_UNLOCK(buf); 2532 return result; 2533 } 2534 2535 int 2536 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2537 { 2538 int res = -1; 2539 va_list ap; 2540 2541 va_start(ap, fmt); 2542 res = evbuffer_add_vprintf(buf, fmt, ap); 2543 va_end(ap); 2544 2545 return (res); 2546 } 2547 2548 int 2549 evbuffer_add_reference(struct evbuffer *outbuf, 2550 const void *data, size_t datlen, 2551 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2552 { 2553 struct evbuffer_chain *chain; 2554 struct evbuffer_chain_reference *info; 2555 int result = -1; 2556 2557 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2558 if (!chain) 2559 return (-1); 2560 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2561 chain->buffer = (u_char *)data; 2562 chain->buffer_len = datlen; 2563 chain->off = datlen; 2564 2565 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2566 info->cleanupfn = cleanupfn; 2567 info->extra = extra; 2568 2569 EVBUFFER_LOCK(outbuf); 2570 if (outbuf->freeze_end) { 2571 /* don't call chain_free; we do not want to actually invoke 2572 * the cleanup function */ 2573 mm_free(chain); 2574 goto done; 2575 } 2576 evbuffer_chain_insert(outbuf, chain); 2577 outbuf->n_add_for_cb += datlen; 2578 2579 evbuffer_invoke_callbacks(outbuf); 2580 2581 result = 0; 2582 done: 2583 EVBUFFER_UNLOCK(outbuf); 2584 2585 return result; 2586 } 2587 2588 /* TODO(niels): we may want to add to automagically convert to mmap, in 2589 * case evbuffer_remove() or evbuffer_pullup() are being used. 2590 */ 2591 struct evbuffer_file_segment * 2592 evbuffer_file_segment_new( 2593 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2594 { 2595 struct evbuffer_file_segment *seg = 2596 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2597 if (!seg) 2598 return NULL; 2599 seg->refcnt = 1; 2600 seg->fd = fd; 2601 seg->flags = flags; 2602 2603 #ifdef _WIN32 2604 #define lseek _lseeki64 2605 #define fstat _fstat 2606 #define stat _stat 2607 #endif 2608 if (length == -1) { 2609 struct stat st; 2610 if (fstat(fd, &st) < 0) 2611 goto err; 2612 length = st.st_size; 2613 } 2614 seg->length = length; 2615 2616 #if defined(USE_SENDFILE) 2617 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 2618 seg->offset = offset; 2619 seg->type = EVBUF_FS_SENDFILE; 2620 goto done; 2621 } 2622 #endif 2623 #if defined(_EVENT_HAVE_MMAP) 2624 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 2625 off_t offset_rounded = 0, offset_leftover = 0; 2626 void *mapped; 2627 if (offset) { 2628 /* mmap implementations don't generally like us 2629 * to have an offset that isn't a round */ 2630 #ifdef SC_PAGE_SIZE 2631 long page_size = sysconf(SC_PAGE_SIZE); 2632 #elif defined(_SC_PAGE_SIZE) 2633 long page_size = sysconf(_SC_PAGE_SIZE); 2634 #else 2635 long page_size = 1; 2636 #endif 2637 if (page_size == -1) 2638 goto err; 2639 offset_leftover = offset % page_size; 2640 offset_rounded = offset - offset_leftover; 2641 } 2642 mapped = mmap(NULL, length + offset_leftover, 2643 PROT_READ, 2644 #ifdef MAP_NOCACHE 2645 MAP_NOCACHE | /* ??? */ 2646 #endif 2647 #ifdef MAP_FILE 2648 MAP_FILE | 2649 #endif 2650 MAP_PRIVATE, 2651 fd, offset_rounded); 2652 if (mapped == MAP_FAILED) { 2653 event_warn("%s: mmap(%d, %d, %zu) failed", 2654 __func__, fd, 0, (size_t)(offset + length)); 2655 } else { 2656 seg->mapping = mapped; 2657 seg->contents = (char*)mapped+offset_leftover; 2658 seg->offset = 0; 2659 seg->type = EVBUF_FS_MMAP; 2660 goto done; 2661 } 2662 } 2663 #endif 2664 #ifdef _WIN32 2665 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 2666 long h = (long)_get_osfhandle(fd); 2667 HANDLE m; 2668 ev_uint64_t total_size = length+offset; 2669 if (h == (long)INVALID_HANDLE_VALUE) 2670 return NULL; 2671 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 2672 (total_size >> 32), total_size & 0xfffffffful, 2673 NULL); 2674 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 2675 seg->mapping_handle = m; 2676 seg->offset = offset; 2677 seg->type = EVBUF_FS_MMAP; 2678 goto done; 2679 } 2680 } 2681 #endif 2682 2683 { 2684 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 2685 ev_off_t read_so_far = 0; 2686 char *mem; 2687 int e; 2688 ev_ssize_t n = 0; 2689 if (!(mem = mm_malloc(length))) 2690 goto err; 2691 if (start_pos < 0) { 2692 mm_free(mem); 2693 goto err; 2694 } 2695 if (lseek(fd, offset, SEEK_SET) < 0) { 2696 mm_free(mem); 2697 goto err; 2698 } 2699 while (read_so_far < length) { 2700 n = read(fd, mem+read_so_far, length-read_so_far); 2701 if (n <= 0) 2702 break; 2703 read_so_far += n; 2704 } 2705 2706 e = errno; 2707 pos = lseek(fd, start_pos, SEEK_SET); 2708 if (n < 0 || (n == 0 && length > read_so_far)) { 2709 mm_free(mem); 2710 errno = e; 2711 goto err; 2712 } else if (pos < 0) { 2713 mm_free(mem); 2714 goto err; 2715 } 2716 2717 seg->contents = mem; 2718 seg->type = EVBUF_FS_IO; 2719 } 2720 2721 done: 2722 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 2723 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 2724 } 2725 return seg; 2726 err: 2727 mm_free(seg); 2728 return NULL; 2729 } 2730 2731 void 2732 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 2733 { 2734 int refcnt; 2735 EVLOCK_LOCK(seg->lock, 0); 2736 refcnt = --seg->refcnt; 2737 EVLOCK_UNLOCK(seg->lock, 0); 2738 if (refcnt > 0) 2739 return; 2740 EVUTIL_ASSERT(refcnt == 0); 2741 2742 if (seg->type == EVBUF_FS_SENDFILE) { 2743 ; 2744 } else if (seg->type == EVBUF_FS_MMAP) { 2745 #ifdef _WIN32 2746 CloseHandle(seg->mapping_handle); 2747 #elif defined (_EVENT_HAVE_MMAP) 2748 if (munmap(seg->mapping, seg->length) == -1) 2749 event_warn("%s: munmap failed", __func__); 2750 #endif 2751 } else { 2752 EVUTIL_ASSERT(seg->type == EVBUF_FS_IO); 2753 mm_free(seg->contents); 2754 } 2755 2756 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 2757 close(seg->fd); 2758 } 2759 2760 EVTHREAD_FREE_LOCK(seg->lock, 0); 2761 mm_free(seg); 2762 } 2763 2764 int 2765 evbuffer_add_file_segment(struct evbuffer *buf, 2766 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 2767 { 2768 struct evbuffer_chain *chain; 2769 struct evbuffer_chain_file_segment *extra; 2770 2771 EVLOCK_LOCK(seg->lock, 0); 2772 ++seg->refcnt; 2773 EVLOCK_UNLOCK(seg->lock, 0); 2774 2775 EVBUFFER_LOCK(buf); 2776 2777 if (buf->freeze_end) 2778 goto err; 2779 2780 if (length < 0) { 2781 if (offset > seg->length) 2782 goto err; 2783 length = seg->length - offset; 2784 } 2785 2786 /* Can we actually add this? */ 2787 if (offset+length > seg->length) 2788 goto err; 2789 2790 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 2791 if (!chain) 2792 goto err; 2793 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 2794 2795 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 2796 if (seg->type == EVBUF_FS_SENDFILE) { 2797 chain->flags |= EVBUFFER_SENDFILE; 2798 chain->misalign = seg->offset + offset; 2799 chain->off = length; 2800 chain->buffer_len = chain->misalign + length; 2801 } else if (seg->type == EVBUF_FS_MMAP) { 2802 #ifdef _WIN32 2803 ev_uint64_t total_offset = seg->offset+offset; 2804 ev_uint64_t offset_rounded=0, offset_remaining=0; 2805 LPVOID data; 2806 if (total_offset) { 2807 SYSTEM_INFO si; 2808 memset(&si, 0, sizeof(si)); /* cargo cult */ 2809 GetSystemInfo(&si); 2810 offset_remaining = total_offset % si.dwAllocationGranularity; 2811 offset_rounded = total_offset - offset_remaining; 2812 } 2813 data = MapViewOfFile( 2814 seg->mapping_handle, 2815 FILE_MAP_READ, 2816 offset_rounded >> 32, 2817 offset_rounded & 0xfffffffful, 2818 length); 2819 if (data == NULL) { 2820 mm_free(chain); 2821 goto err; 2822 } 2823 chain->buffer = (unsigned char*) data; 2824 chain->buffer_len = length+offset_remaining; 2825 chain->misalign = offset_remaining; 2826 chain->off = length; 2827 #else 2828 chain->buffer = (unsigned char*)(seg->contents + offset); 2829 chain->buffer_len = length; 2830 chain->off = length; 2831 #endif 2832 } else { 2833 EVUTIL_ASSERT(seg->type == EVBUF_FS_IO); 2834 chain->buffer = (unsigned char*)(seg->contents + offset); 2835 chain->buffer_len = length; 2836 chain->off = length; 2837 } 2838 2839 extra->segment = seg; 2840 buf->n_add_for_cb += length; 2841 evbuffer_chain_insert(buf, chain); 2842 2843 evbuffer_invoke_callbacks(buf); 2844 2845 EVBUFFER_UNLOCK(buf); 2846 2847 return 0; 2848 err: 2849 EVBUFFER_UNLOCK(buf); 2850 evbuffer_file_segment_free(seg); 2851 return -1; 2852 } 2853 2854 int 2855 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 2856 { 2857 struct evbuffer_file_segment *seg; 2858 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 2859 int r; 2860 2861 seg = evbuffer_file_segment_new(fd, offset, length, flags); 2862 if (!seg) 2863 return -1; 2864 r = evbuffer_add_file_segment(buf, seg, 0, length); 2865 evbuffer_file_segment_free(seg); 2866 return r; 2867 } 2868 2869 void 2870 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 2871 { 2872 EVBUFFER_LOCK(buffer); 2873 2874 if (!TAILQ_EMPTY(&buffer->callbacks)) 2875 evbuffer_remove_all_callbacks(buffer); 2876 2877 if (cb) { 2878 struct evbuffer_cb_entry *ent = 2879 evbuffer_add_cb(buffer, NULL, cbarg); 2880 ent->cb.cb_obsolete = cb; 2881 ent->flags |= EVBUFFER_CB_OBSOLETE; 2882 } 2883 EVBUFFER_UNLOCK(buffer); 2884 } 2885 2886 struct evbuffer_cb_entry * 2887 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 2888 { 2889 struct evbuffer_cb_entry *e; 2890 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 2891 return NULL; 2892 EVBUFFER_LOCK(buffer); 2893 e->cb.cb_func = cb; 2894 e->cbarg = cbarg; 2895 e->flags = EVBUFFER_CB_ENABLED; 2896 TAILQ_INSERT_HEAD(&buffer->callbacks, e, next); 2897 EVBUFFER_UNLOCK(buffer); 2898 return e; 2899 } 2900 2901 int 2902 evbuffer_remove_cb_entry(struct evbuffer *buffer, 2903 struct evbuffer_cb_entry *ent) 2904 { 2905 EVBUFFER_LOCK(buffer); 2906 TAILQ_REMOVE(&buffer->callbacks, ent, next); 2907 EVBUFFER_UNLOCK(buffer); 2908 mm_free(ent); 2909 return 0; 2910 } 2911 2912 int 2913 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 2914 { 2915 struct evbuffer_cb_entry *cbent; 2916 int result = -1; 2917 EVBUFFER_LOCK(buffer); 2918 TAILQ_FOREACH(cbent, &buffer->callbacks, next) { 2919 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 2920 result = evbuffer_remove_cb_entry(buffer, cbent); 2921 goto done; 2922 } 2923 } 2924 done: 2925 EVBUFFER_UNLOCK(buffer); 2926 return result; 2927 } 2928 2929 int 2930 evbuffer_cb_set_flags(struct evbuffer *buffer, 2931 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 2932 { 2933 /* the user isn't allowed to mess with these. */ 2934 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 2935 EVBUFFER_LOCK(buffer); 2936 cb->flags |= flags; 2937 EVBUFFER_UNLOCK(buffer); 2938 return 0; 2939 } 2940 2941 int 2942 evbuffer_cb_clear_flags(struct evbuffer *buffer, 2943 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 2944 { 2945 /* the user isn't allowed to mess with these. */ 2946 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 2947 EVBUFFER_LOCK(buffer); 2948 cb->flags &= ~flags; 2949 EVBUFFER_UNLOCK(buffer); 2950 return 0; 2951 } 2952 2953 int 2954 evbuffer_freeze(struct evbuffer *buffer, int start) 2955 { 2956 EVBUFFER_LOCK(buffer); 2957 if (start) 2958 buffer->freeze_start = 1; 2959 else 2960 buffer->freeze_end = 1; 2961 EVBUFFER_UNLOCK(buffer); 2962 return 0; 2963 } 2964 2965 int 2966 evbuffer_unfreeze(struct evbuffer *buffer, int start) 2967 { 2968 EVBUFFER_LOCK(buffer); 2969 if (start) 2970 buffer->freeze_start = 0; 2971 else 2972 buffer->freeze_end = 0; 2973 EVBUFFER_UNLOCK(buffer); 2974 return 0; 2975 } 2976 2977 #if 0 2978 void 2979 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 2980 { 2981 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 2982 cb->size_before_suspend = evbuffer_get_length(buffer); 2983 cb->flags |= EVBUFFER_CB_SUSPENDED; 2984 } 2985 } 2986 2987 void 2988 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 2989 { 2990 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 2991 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 2992 size_t sz = cb->size_before_suspend; 2993 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 2994 EVBUFFER_CB_CALL_ON_UNSUSPEND); 2995 cb->size_before_suspend = 0; 2996 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 2997 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 2998 } 2999 } 3000 } 3001 #endif 3002 3003