1 #include "first.h" 2 3 #include "stat_cache.h" 4 #include "log.h" 5 #include "fdevent.h" 6 #include "http_etag.h" 7 #include "algo_splaytree.h" 8 9 #include <sys/types.h> 10 #include <sys/stat.h> 11 12 #include <stdlib.h> 13 #include <string.h> 14 #include <errno.h> 15 #include <unistd.h> 16 #include <fcntl.h> 17 18 #if defined(HAVE_SYS_XATTR_H) 19 # include <sys/xattr.h> 20 #elif defined(HAVE_ATTR_ATTRIBUTES_H) 21 # include <attr/attributes.h> 22 #endif 23 24 #ifdef HAVE_SYS_EXTATTR_H 25 # include <sys/extattr.h> 26 #endif 27 28 #ifndef HAVE_LSTAT 29 #define lstat stat 30 #ifndef S_ISLNK 31 #define S_ISLNK(mode) (0) 32 #endif 33 #endif 34 35 /* 36 * stat-cache 37 * 38 * - a splay-tree is used as we can use the caching effect of it 39 */ 40 41 enum { 42 STAT_CACHE_ENGINE_SIMPLE = 0 /*(default)*/ 43 ,STAT_CACHE_ENGINE_NONE = 1 44 ,STAT_CACHE_ENGINE_FAM = 2 /* same as STAT_CACHE_ENGINE_INOTIFY */ 45 ,STAT_CACHE_ENGINE_INOTIFY = 2 /* same as STAT_CACHE_ENGINE_FAM */ 46 ,STAT_CACHE_ENGINE_KQUEUE = 2 /* same as STAT_CACHE_ENGINE_FAM */ 47 }; 48 49 struct stat_cache_fam; /* declaration */ 50 51 typedef struct stat_cache { 52 int stat_cache_engine; 53 splay_tree *files; /* nodes of tree are (stat_cache_entry *) */ 54 struct stat_cache_fam *scf; 55 } stat_cache; 56 57 static stat_cache sc; 58 59 60 static void * stat_cache_sptree_find(splay_tree ** const sptree, 61 const char * const name, 62 uint32_t len) 63 { 64 const int ndx = splaytree_djbhash(name, len); 65 *sptree = splaytree_splay(*sptree, ndx); 66 return (*sptree && (*sptree)->key == ndx) ? (*sptree)->data : NULL; 67 } 68 69 70 #if defined(HAVE_SYS_INOTIFY_H) \ 71 || (defined(HAVE_SYS_EVENT_H) && defined(HAVE_KQUEUE)) 72 #ifndef HAVE_FAM_H 73 #define HAVE_FAM_H 74 #endif 75 #endif 76 77 #ifdef HAVE_FAM_H 78 79 /* monitor changes in directories using FAM 80 * 81 * This implementation employing FAM monitors directories as they are used, 82 * and maintains a reference count for cache use within stat_cache.c. 83 * A periodic job runs in lighttpd every 32 seconds, expiring entries unused 84 * in last 64 seconds out of the cache and cancelling FAM monitoring. Items 85 * within the cache are checked against the filesystem upon use if last stat() 86 * was greater than or equal to 16 seconds ago. 87 * 88 * This implementation does not monitor every directory in a tree, and therefore 89 * the cache may get out-of-sync with the filesystem. Delays in receiving and 90 * processing events from FAM might also lead to stale cache entries. 91 * 92 * For many websites, a large number of files are seldom, if ever, modified, 93 * and a common practice with images is to create a new file with a new name 94 * when a new version is needed, in order for client browsers and CDNs to better 95 * cache the content. Given this, most use will see little difference in 96 * performance between server.stat-cache-engine = "fam" and "simple" (default). 97 * The default server.stat-cache-engine = "simple" calls stat() on a target once 98 * per second, and reuses that information until the next second. For use where 99 * changes must be immediately visible, server.stat-cache-engine = "disable" 100 * should be used. 101 * 102 * When considering use of server.stat-cache-engine = "fam", there are a few 103 * additional limitations for this cache implementation using FAM. 104 * - symlinks to files located outside of the current directory do not result 105 * in changes to that file being monitored (unless that file is in a directory 106 * which is monitored as a result of a different request). symlinks can be 107 * chained and can be circular. This implementation *does not* readlink() or 108 * realpath() to resolve the chains to find and monitor the ultimate target 109 * directory. While symlinks to files located outside the current directory 110 * are not monitored, symlinks to directories *are* monitored, though chains 111 * of symlinks to directories do not result in monitoring of the directories 112 * containing intermediate symlinks to the target directory. 113 * - directory rename of a directory which is not currently being monitored will 114 * result in stale information in the cache if there is a subdirectory that is 115 * being monitored. 116 * Even though lighttpd will not receive FAM events in the above cases, lighttpd 117 * does re-validate the information in the cache upon use if the cache entry has 118 * not been checked in 16 seconds, so that is the upper limit for use of stale 119 * data. 120 * 121 * Use of server.stat-cache-engine = "fam" is discouraged for extremely volatile 122 * directories such as temporary directories (e.g. /tmp and maybe /var/tmp) due 123 * to the overhead of processing the additional noise generated from changes. 124 * Related, server.stat-cache-engine = "fam" is not recommended on trees of 125 * untrusted files where a malicious user could generate an excess of change 126 * events. 127 * 128 * Internal note: lighttpd walks the caches to prune trees in stat_cache when an 129 * event is received for a directory (or symlink to a directory) which has been 130 * deleted or renamed. The splaytree data structure is suboptimal for frequent 131 * changes of large directories trees where there have been a large number of 132 * different files recently accessed and part of the stat_cache. 133 */ 134 135 #if defined(HAVE_SYS_INOTIFY_H) \ 136 && !(defined(HAVE_SYS_EVENT_H) && defined(HAVE_KQUEUE)) 137 138 #include <sys/inotify.h> 139 #ifndef IN_EXCL_UNLINK /*(not defined in some very old glibc headers)*/ 140 #define IN_EXCL_UNLINK 0x04000000 141 #endif 142 143 /*(translate FAM API to inotify; this is specific to stat_cache.c use of FAM)*/ 144 #define fam fd /*(translate struct stat_cache_fam scf->fam -> scf->fd)*/ 145 typedef int FAMRequest; /*(fr)*/ 146 #define FAMClose(fd) \ 147 close(*(fd)) 148 #define FAMCancelMonitor(fd, wd) \ 149 inotify_rm_watch(*(fd), *(wd)) 150 #define fam_watch_mask ( IN_ATTRIB | IN_CREATE | IN_DELETE | IN_DELETE_SELF \ 151 | IN_MODIFY | IN_MOVE_SELF | IN_MOVED_FROM \ 152 | IN_EXCL_UNLINK | IN_ONLYDIR ) 153 /*(note: follows symlinks; not providing IN_DONT_FOLLOW)*/ 154 #define FAMMonitorDirectory(fd, fn, wd, userData) \ 155 ((*(wd) = inotify_add_watch(*(fd), (fn), (fam_watch_mask))) < 0) 156 typedef enum FAMCodes { /*(copied from fam.h to define arbitrary enum values)*/ 157 FAMChanged=1, 158 FAMDeleted=2, 159 FAMCreated=5, 160 FAMMoved=6, 161 } FAMCodes; 162 163 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 164 #undef HAVE_SYS_INOTIFY_H 165 166 #include <sys/event.h> 167 #include <sys/time.h> 168 169 /*(translate FAM API to inotify; this is specific to stat_cache.c use of FAM)*/ 170 #define fam fd /*(translate struct stat_cache_fam scf->fam -> scf->fd)*/ 171 typedef int FAMRequest; /*(fr)*/ 172 #define FAMClose(fd) \ 173 (-1 != (*(fd)) ? close(*(fd)) : 0) 174 static int FAMCancelMonitor (const int * const fd, int * const wd) 175 { 176 if (-1 == *fd) return 0; 177 if (-1 == *wd) return 0; 178 struct timespec t0 = { 0, 0 }; 179 struct kevent kev; 180 EV_SET(&kev, *wd, EVFILT_VNODE, EV_DELETE, 0, 0, 0); 181 int rc = kevent(*fd, &kev, 1, NULL, 0, &t0); 182 close(*wd); 183 *wd = -1; 184 return rc; 185 } 186 static int FAMMonitorDirectory (int * const fd, char * const fn, int * const wd, void * const userData) 187 { 188 *wd = fdevent_open_dirname(fn, 1); /*(note: follows symlinks)*/ 189 if (-1 == *wd) return -1; 190 struct timespec t0 = { 0, 0 }; 191 struct kevent kev; 192 unsigned short kev_flags = EV_ADD | EV_ENABLE | EV_CLEAR; 193 unsigned int kev_fflags = NOTE_ATTRIB | NOTE_EXTEND | NOTE_LINK | NOTE_WRITE 194 | NOTE_DELETE | NOTE_REVOKE | NOTE_RENAME; 195 EV_SET(&kev, *wd, EVFILT_VNODE, kev_flags, kev_fflags, 0, userData); 196 return kevent(*fd, &kev, 1, NULL, 0, &t0); 197 } 198 typedef enum FAMCodes { /*(copied from fam.h to define arbitrary enum values)*/ 199 FAMChanged=1, 200 FAMDeleted=2, 201 FAMCreated=5, 202 FAMMoved=6, 203 } FAMCodes; 204 205 #else 206 207 #include <fam.h> 208 209 #ifdef HAVE_FAMNOEXISTS 210 #ifndef LIGHTTPD_STATIC 211 #ifdef HAVE_DLFCN_H 212 #include <dlfcn.h> 213 #endif 214 #endif 215 #endif 216 217 #endif 218 219 typedef struct fam_dir_entry { 220 buffer name; 221 int refcnt; 222 FAMRequest req; 223 unix_time64_t stat_ts; 224 dev_t st_dev; 225 ino_t st_ino; 226 struct fam_dir_entry *fam_parent; 227 } fam_dir_entry; 228 229 typedef struct stat_cache_fam { 230 splay_tree *dirs; /* indexed by path; node data is fam_dir_entry */ 231 #ifdef HAVE_SYS_INOTIFY_H 232 splay_tree *wds; /* indexed by inotify watch descriptor */ 233 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 234 #else 235 FAMConnection fam; 236 #endif 237 log_error_st *errh; 238 fdevents *ev; 239 fdnode *fdn; 240 int fd; 241 } stat_cache_fam; 242 243 __attribute_returns_nonnull__ 244 static fam_dir_entry * fam_dir_entry_init(const char *name, size_t len) 245 { 246 fam_dir_entry * const fam_dir = calloc(1, sizeof(*fam_dir)); 247 force_assert(NULL != fam_dir); 248 249 buffer_copy_string_len(&fam_dir->name, name, len); 250 fam_dir->refcnt = 0; 251 #if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 252 fam_dir->req = -1; 253 #endif 254 255 return fam_dir; 256 } 257 258 static void fam_dir_entry_free(fam_dir_entry *fam_dir) 259 { 260 if (!fam_dir) return; 261 /*(fam_dir->fam_parent might be invalid pointer here; ignore)*/ 262 free(fam_dir->name.ptr); 263 #if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 264 if (-1 != fam_dir->req) 265 close(fam_dir->req); 266 #endif 267 free(fam_dir); 268 } 269 270 static void fam_dir_invalidate_node(fam_dir_entry *fam_dir) 271 { 272 fam_dir->stat_ts = 0; 273 if (fam_dir->fam_parent) { 274 --fam_dir->fam_parent->refcnt; 275 fam_dir->fam_parent = NULL; 276 } 277 } 278 279 /* 280 * walk though splay_tree and collect contents of dir tree. 281 * remove tagged entries in a second loop 282 */ 283 284 static void fam_dir_tag_refcnt(splay_tree *t, int *keys, int *ndx) 285 { 286 if (*ndx == 512) return; /*(must match num array entries in keys[])*/ 287 if (t->left) fam_dir_tag_refcnt(t->left, keys, ndx); 288 if (t->right) fam_dir_tag_refcnt(t->right, keys, ndx); 289 if (*ndx == 512) return; /*(must match num array entries in keys[])*/ 290 291 fam_dir_entry * const fam_dir = t->data; 292 if (0 == fam_dir->refcnt) { 293 fam_dir_invalidate_node(fam_dir); 294 keys[(*ndx)++] = t->key; 295 } 296 } 297 298 __attribute_noinline__ 299 static void fam_dir_periodic_cleanup() { 300 stat_cache_fam * const scf = sc.scf; 301 int max_ndx, i; 302 int keys[512]; /* 2k size on stack */ 303 #if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 304 struct kevent kevl[512]; /* 32k size on stack to batch kevent EV_DELETE */ 305 #endif 306 do { 307 if (!scf->dirs) break; 308 max_ndx = 0; 309 fam_dir_tag_refcnt(scf->dirs, keys, &max_ndx); 310 for (i = 0; i < max_ndx; ++i) { 311 const int ndx = keys[i]; 312 splay_tree *node = scf->dirs = splaytree_splay(scf->dirs, ndx); 313 if (node && node->key == ndx) { 314 fam_dir_entry *fam_dir = node->data; 315 scf->dirs = splaytree_delete(scf->dirs, ndx); 316 #ifdef HAVE_SYS_INOTIFY_H 317 scf->wds = splaytree_delete(scf->wds, fam_dir->req); 318 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 319 /* batch process kevent removal; defer cancel */ 320 EV_SET(kevl+i, fam_dir->req, EVFILT_VNODE, EV_DELETE, 0, 0, 0); 321 fam_dir->req = -1; /*(make FAMCancelMonitor() a no-op)*/ 322 #endif 323 FAMCancelMonitor(&scf->fam, &fam_dir->req); 324 fam_dir_entry_free(fam_dir); 325 } 326 } 327 #if defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 328 /* batch process: kevent() to submit EV_DELETE, then close dir fds */ 329 if (0 == max_ndx) break; 330 struct timespec t0 = { 0, 0 }; 331 kevent(scf->fd, kevl, max_ndx, NULL, 0, &t0); 332 for (i = 0; i < max_ndx; ++i) 333 close((int)kevl[i].ident); 334 #endif 335 } while (max_ndx == sizeof(keys)/sizeof(int)); 336 } 337 338 static void fam_dir_invalidate_tree(splay_tree *t, const char *name, size_t len) 339 { 340 #ifdef __clang_analyzer__ 341 force_assert(name); 342 #endif 343 /*force_assert(t);*/ 344 if (t->left) fam_dir_invalidate_tree(t->left, name, len); 345 if (t->right) fam_dir_invalidate_tree(t->right, name, len); 346 347 fam_dir_entry * const fam_dir = t->data; 348 #ifdef __clang_analyzer__ 349 force_assert(fam_dir); 350 #endif 351 const buffer * const b = &fam_dir->name; 352 size_t blen = buffer_clen(b); 353 if (blen > len && b->ptr[len] == '/' && 0 == memcmp(b->ptr, name, len)) 354 fam_dir_invalidate_node(fam_dir); 355 } 356 357 /* declarations */ 358 static void stat_cache_delete_tree(const char *name, uint32_t len); 359 static void stat_cache_invalidate_dir_tree(const char *name, size_t len); 360 static void stat_cache_handle_fdevent_fn(stat_cache_fam * const scf, fam_dir_entry * const fam_dir, const char * const fn, const uint32_t fnlen, int code); 361 362 static void stat_cache_handle_fdevent_in(stat_cache_fam *scf) 363 { 364 #ifdef HAVE_SYS_INOTIFY_H 365 /*(inotify pads in->len to align struct following in->name[])*/ 366 char buf[4096] 367 __attribute__ ((__aligned__(__alignof__(struct inotify_event)))); 368 int rd; 369 do { 370 rd = (int)read(scf->fd, buf, sizeof(buf)); 371 if (rd <= 0) { 372 if (-1 == rd && errno != EINTR && errno != EAGAIN) { 373 log_perror(scf->errh, __FILE__, __LINE__, "inotify error"); 374 /* TODO: could flush cache, close scf->fd, and re-open inotify*/ 375 } 376 break; 377 } 378 for (int i = 0; i < rd; ) { 379 struct inotify_event * const in = 380 (struct inotify_event *)((uintptr_t)buf + i); 381 uint32_t len = in->len; 382 if (len > sizeof(buf)) break; /*(should not happen)*/ 383 i += sizeof(struct inotify_event) + len; 384 if (i > rd) break; /*(should not happen (partial record))*/ 385 if (in->mask & IN_CREATE) 386 continue; /*(see comment below for FAMCreated)*/ 387 if (in->mask & IN_Q_OVERFLOW) { 388 log_error(scf->errh, __FILE__, __LINE__, 389 "inotify queue overflow"); 390 continue; 391 } 392 /* ignore events which may have been pending for 393 * paths recently cancelled via FAMCancelMonitor() */ 394 scf->wds = splaytree_splay(scf->wds, in->wd); 395 if (!scf->wds || scf->wds->key != in->wd) 396 continue; 397 fam_dir_entry *fam_dir = scf->wds->data; 398 if (NULL == fam_dir) /*(should not happen)*/ 399 continue; 400 if (fam_dir->req != in->wd) /*(should not happen)*/ 401 continue; 402 /*(specific to use here in stat_cache.c)*/ 403 int code = 0; 404 if (in->mask & (IN_ATTRIB | IN_MODIFY)) 405 code = FAMChanged; 406 else if (in->mask & (IN_DELETE | IN_DELETE_SELF | IN_UNMOUNT)) 407 code = FAMDeleted; 408 else if (in->mask & (IN_MOVE_SELF | IN_MOVED_FROM)) 409 code = FAMMoved; 410 411 if (len) { 412 do { --len; } while (len && in->name[len-1] == '\0'); 413 } 414 stat_cache_handle_fdevent_fn(scf, fam_dir, in->name, len, code); 415 } 416 } while (rd + sizeof(struct inotify_event) + NAME_MAX + 1 > sizeof(buf)); 417 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 418 struct kevent kevl[256]; 419 struct timespec t0 = { 0, 0 }; 420 int n; 421 do { 422 n = kevent(scf->fd, NULL, 0, kevl, sizeof(kevl)/sizeof(*kevl), &t0); 423 if (n <= 0) break; 424 for (int i = 0; i < n; ++i) { 425 const struct kevent * const kev = kevl+i; 426 /* ignore events which may have been pending for 427 * paths recently cancelled via FAMCancelMonitor() */ 428 int ndx = (int)(intptr_t)kev->udata; 429 scf->dirs = splaytree_splay(scf->dirs, ndx); 430 if (!scf->dirs || scf->dirs->key != ndx) 431 continue; 432 fam_dir_entry *fam_dir = scf->dirs->data; 433 if (fam_dir->req != (int)kev->ident) 434 continue; 435 /*(specific to use here in stat_cache.c)*/ 436 /* note: stat_cache only monitors on directories, 437 * so events here are only on directories 438 * note: changes are treated as FAMDeleted since 439 * it is unknown which file in dir was changed 440 * This is not efficient, but this stat_cache mechanism also 441 * should not be used on frequently modified directories. */ 442 int code = 0; 443 if (kev->fflags & (NOTE_WRITE|NOTE_ATTRIB|NOTE_EXTEND|NOTE_LINK)) 444 code = FAMDeleted; /*(not FAMChanged; see comment above)*/ 445 else if (kev->fflags & (NOTE_DELETE|NOTE_REVOKE)) 446 code = FAMDeleted; 447 else if (kev->fflags & NOTE_RENAME) 448 code = FAMMoved; 449 if (kev->flags & EV_ERROR) /*(not expected; treat as FAMDeleted)*/ 450 code = FAMDeleted; 451 stat_cache_handle_fdevent_fn(scf, fam_dir, NULL, 0, code); 452 } 453 } while (n == sizeof(kevl)/sizeof(*kevl)); 454 #else 455 for (int i = 0, ndx; i || (i = FAMPending(&scf->fam)) > 0; --i) { 456 FAMEvent fe; 457 if (FAMNextEvent(&scf->fam, &fe) < 0) break; 458 459 /* ignore events which may have been pending for 460 * paths recently cancelled via FAMCancelMonitor() */ 461 ndx = (int)(intptr_t)fe.userdata; 462 scf->dirs = splaytree_splay(scf->dirs, ndx); 463 if (!scf->dirs || scf->dirs->key != ndx) { 464 continue; 465 } 466 fam_dir_entry *fam_dir = scf->dirs->data; 467 if (FAMREQUEST_GETREQNUM(&fam_dir->req) 468 != FAMREQUEST_GETREQNUM(&fe.fr)) { 469 continue; 470 } 471 472 uint32_t fnlen = (fe.code != FAMCreated && fe.filename[0] != '/') 473 ? (uint32_t)strlen(fe.filename) 474 : 0; 475 stat_cache_handle_fdevent_fn(scf, fam_dir, fe.filename, fnlen, fe.code); 476 } 477 #endif 478 } 479 480 static void stat_cache_handle_fdevent_fn(stat_cache_fam * const scf, fam_dir_entry *fam_dir, const char * const fn, const uint32_t fnlen, int code) 481 { 482 if (fnlen) { 483 buffer * const n = &fam_dir->name; 484 fam_dir_entry *fam_link; 485 uint32_t len; 486 switch (code) { 487 case FAMCreated: 488 /* file created in monitored dir modifies dir and 489 * we should get a separate FAMChanged event for dir. 490 * Therefore, ignore file FAMCreated event here. 491 * Also, if FAMNoExists() is used, might get spurious 492 * FAMCreated events as changes are made e.g. in monitored 493 * sub-sub-sub dirs and the library discovers new (already 494 * existing) dir entries */ 495 return; 496 case FAMChanged: 497 /* file changed in monitored dir does not modify dir */ 498 case FAMDeleted: 499 case FAMMoved: 500 /* file deleted or moved in monitored dir modifies dir, 501 * but FAM provides separate notification for that */ 502 503 /* temporarily append filename to dir in fam_dir->name to 504 * construct path, then delete stat_cache entry (if any)*/ 505 len = buffer_clen(n); 506 buffer_append_path_len(n, fn, fnlen); 507 /* (alternatively, could chose to stat() and update)*/ 508 stat_cache_invalidate_entry(BUF_PTR_LEN(n)); 509 510 fam_link = /*(check if might be symlink to monitored dir)*/ 511 stat_cache_sptree_find(&scf->dirs, BUF_PTR_LEN(n)); 512 if (fam_link && !buffer_is_equal(&fam_link->name, n)) 513 fam_link = NULL; 514 515 buffer_truncate(n, len); 516 517 if (fam_link) { 518 /* replaced symlink changes containing dir */ 519 stat_cache_invalidate_entry(n->ptr, len); 520 /* handle symlink to dir as deleted dir below */ 521 code = FAMDeleted; 522 fam_dir = fam_link; 523 break; 524 } 525 return; 526 default: 527 return; 528 } 529 } 530 531 switch(code) { 532 case FAMChanged: 533 stat_cache_invalidate_entry(BUF_PTR_LEN(&fam_dir->name)); 534 break; 535 case FAMDeleted: 536 case FAMMoved: 537 stat_cache_delete_tree(BUF_PTR_LEN(&fam_dir->name)); 538 fam_dir_invalidate_node(fam_dir); 539 if (scf->dirs) 540 fam_dir_invalidate_tree(scf->dirs, 541 BUF_PTR_LEN(&fam_dir->name)); 542 fam_dir_periodic_cleanup(); 543 break; 544 default: 545 break; 546 } 547 } 548 549 static handler_t stat_cache_handle_fdevent(void *ctx, int revent) 550 { 551 stat_cache_fam * const scf = ctx; /* sc.scf */ 552 553 if (revent & FDEVENT_IN) { 554 stat_cache_handle_fdevent_in(scf); 555 } 556 557 if (revent & (FDEVENT_HUP|FDEVENT_RDHUP)) { 558 /* fam closed the connection */ 559 log_error(scf->errh, __FILE__, __LINE__, 560 "FAM connection closed; disabling stat_cache."); 561 /* (although effectively STAT_CACHE_ENGINE_NONE, 562 * do not change here so that periodic jobs clean up memory)*/ 563 /*sc.stat_cache_engine = STAT_CACHE_ENGINE_NONE; */ 564 fdevent_fdnode_event_del(scf->ev, scf->fdn); 565 fdevent_unregister(scf->ev, scf->fd); 566 scf->fdn = NULL; 567 568 FAMClose(&scf->fam); 569 scf->fd = -1; 570 } 571 572 return HANDLER_GO_ON; 573 } 574 575 static stat_cache_fam * stat_cache_init_fam(fdevents *ev, log_error_st *errh) { 576 stat_cache_fam *scf = calloc(1, sizeof(*scf)); 577 force_assert(scf); 578 scf->fd = -1; 579 scf->ev = ev; 580 scf->errh = errh; 581 582 #ifdef HAVE_SYS_INOTIFY_H 583 scf->fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC); 584 if (scf->fd < 0) { 585 log_perror(errh, __FILE__, __LINE__, "inotify_init1()"); 586 free(scf); 587 return NULL; 588 } 589 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 590 #ifdef __NetBSD__ 591 scf->fd = kqueue1(O_NONBLOCK|O_CLOEXEC|O_NOSIGPIPE); 592 #else 593 scf->fd = kqueue(); 594 if (scf->fd >= 0) fdevent_setfd_cloexec(scf->fd); 595 #endif 596 if (scf->fd < 0) { 597 log_perror(errh, __FILE__, __LINE__, "kqueue()"); 598 free(scf); 599 return NULL; 600 } 601 #else 602 /* setup FAM */ 603 if (0 != FAMOpen2(&scf->fam, "lighttpd")) { 604 log_error(errh, __FILE__, __LINE__, 605 "could not open a fam connection, dying."); 606 free(scf); 607 return NULL; 608 } 609 #ifdef HAVE_FAMNOEXISTS 610 #ifdef LIGHTTPD_STATIC 611 FAMNoExists(&scf->fam); 612 #else 613 int (*FAMNoExists_fn)(FAMConnection *); 614 FAMNoExists_fn = 615 (int (*)(FAMConnection *))(intptr_t)dlsym(RTLD_DEFAULT,"FAMNoExists"); 616 if (FAMNoExists_fn) FAMNoExists_fn(&scf->fam); 617 #endif 618 #endif 619 620 scf->fd = FAMCONNECTION_GETFD(&scf->fam); 621 fdevent_setfd_cloexec(scf->fd); 622 #endif 623 scf->fdn = fdevent_register(scf->ev, scf->fd, stat_cache_handle_fdevent, scf); 624 fdevent_fdnode_event_set(scf->ev, scf->fdn, FDEVENT_IN | FDEVENT_RDHUP); 625 626 return scf; 627 } 628 629 static void stat_cache_free_fam(stat_cache_fam *scf) { 630 if (NULL == scf) return; 631 632 #ifdef HAVE_SYS_INOTIFY_H 633 while (scf->wds) { 634 splay_tree *node = scf->wds; 635 scf->wds = splaytree_delete(scf->wds, node->key); 636 } 637 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 638 /*(quicker cleanup to close kqueue() before cancel per entry)*/ 639 close(scf->fd); 640 scf->fd = -1; 641 #endif 642 while (scf->dirs) { 643 /*(skip entry invalidation and FAMCancelMonitor())*/ 644 splay_tree *node = scf->dirs; 645 fam_dir_entry_free((fam_dir_entry *)node->data); 646 scf->dirs = splaytree_delete(scf->dirs, node->key); 647 } 648 649 if (-1 != scf->fd) { 650 /*scf->fdn already cleaned up in fdevent_free()*/ 651 FAMClose(&scf->fam); 652 /*scf->fd = -1;*/ 653 } 654 655 free(scf); 656 } 657 658 static fam_dir_entry * fam_dir_monitor(stat_cache_fam *scf, char *fn, uint32_t dirlen, struct stat *st) 659 { 660 if (NULL == scf->fdn) return NULL; /* FAM connection closed; do nothing */ 661 const int fn_is_dir = S_ISDIR(st->st_mode); 662 /*force_assert(0 != dirlen);*/ 663 /*force_assert(fn[0] == '/');*/ 664 /* consistency: ensure fn does not end in '/' unless root "/" 665 * FAM events will not end in '/', so easier to match this way */ 666 if (fn[dirlen-1] == '/') --dirlen; 667 if (0 == dirlen) dirlen = 1; /* root dir ("/") */ 668 /* Note: paths are expected to be normalized before calling stat_cache, 669 * e.g. without repeated '/' */ 670 if (!fn_is_dir) { 671 while (fn[--dirlen] != '/') ; 672 if (0 == dirlen) dirlen = 1; /*(should not happen for file)*/ 673 } 674 int dir_ndx = splaytree_djbhash(fn, dirlen); 675 fam_dir_entry *fam_dir = NULL; 676 677 scf->dirs = splaytree_splay(scf->dirs, dir_ndx); 678 if (NULL != scf->dirs && scf->dirs->key == dir_ndx) { 679 fam_dir = scf->dirs->data; 680 if (!buffer_eq_slen(&fam_dir->name, fn, dirlen)) { 681 /* hash collision; preserve existing 682 * do not monitor new to avoid cache thrashing */ 683 return NULL; 684 } 685 /* directory already registered */ 686 } 687 688 const unix_time64_t cur_ts = log_monotonic_secs; 689 struct stat lst; 690 int ck_dir = fn_is_dir; 691 if (!fn_is_dir && (NULL==fam_dir || cur_ts - fam_dir->stat_ts >= 16)) { 692 ck_dir = 1; 693 /*(temporarily modify fn)*/ 694 fn[dirlen] = '\0'; 695 if (0 != lstat(fn, &lst)) { 696 fn[dirlen] = '/'; 697 return NULL; 698 } 699 if (!S_ISLNK(lst.st_mode)) { 700 st = &lst; 701 } 702 else if (0 != stat(fn, st)) { /*st passed in now is stat() of dir*/ 703 fn[dirlen] = '/'; 704 return NULL; 705 } 706 fn[dirlen] = '/'; 707 } 708 709 int ck_lnk = (NULL == fam_dir); 710 if (ck_dir && NULL != fam_dir) { 711 /* check stat() matches device and inode, just in case an external event 712 * not being monitored occurs (e.g. rename of unmonitored parent dir)*/ 713 if (st->st_dev != fam_dir->st_dev || st->st_ino != fam_dir->st_ino) { 714 ck_lnk = 1; 715 /*(modifies scf->dirs but no need to re-splay for dir_ndx since 716 * fam_dir is not NULL and so splaytree_insert not called below)*/ 717 if (scf->dirs) fam_dir_invalidate_tree(scf->dirs, fn, dirlen); 718 if (!fn_is_dir) /*(if dir, caller is updating stat_cache_entry)*/ 719 stat_cache_update_entry(fn, dirlen, st, NULL); 720 /*(must not delete tree since caller is holding a valid node)*/ 721 stat_cache_invalidate_dir_tree(fn, dirlen); 722 #ifdef HAVE_SYS_INOTIFY_H 723 scf->wds = splaytree_delete(scf->wds, fam_dir->req); 724 #endif 725 if (0 != FAMCancelMonitor(&scf->fam, &fam_dir->req) 726 || 0 != FAMMonitorDirectory(&scf->fam, fam_dir->name.ptr, 727 &fam_dir->req, 728 (void *)(intptr_t)dir_ndx)) { 729 fam_dir->stat_ts = 0; /* invalidate */ 730 return NULL; 731 } 732 fam_dir->st_dev = st->st_dev; 733 fam_dir->st_ino = st->st_ino; 734 #ifdef HAVE_SYS_INOTIFY_H 735 scf->wds = splaytree_insert(scf->wds, fam_dir->req, fam_dir); 736 #endif 737 } 738 fam_dir->stat_ts = cur_ts; 739 } 740 741 if (NULL == fam_dir) { 742 fam_dir = fam_dir_entry_init(fn, dirlen); 743 744 if (0 != FAMMonitorDirectory(&scf->fam,fam_dir->name.ptr,&fam_dir->req, 745 (void *)(intptr_t)dir_ndx)) { 746 #if defined(HAVE_SYS_INOTIFY_H) \ 747 || (defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE) 748 log_perror(scf->errh, __FILE__, __LINE__, 749 "monitoring dir failed: %s file: %s", 750 fam_dir->name.ptr, fn); 751 #else 752 log_error(scf->errh, __FILE__, __LINE__, 753 "monitoring dir failed: %s file: %s %s", 754 fam_dir->name.ptr, fn, FamErrlist[FAMErrno]); 755 #endif 756 fam_dir_entry_free(fam_dir); 757 return NULL; 758 } 759 760 scf->dirs = splaytree_insert(scf->dirs, dir_ndx, fam_dir); 761 #ifdef HAVE_SYS_INOTIFY_H 762 scf->wds = splaytree_insert(scf->wds, fam_dir->req, fam_dir); 763 #endif 764 fam_dir->stat_ts= cur_ts; 765 fam_dir->st_dev = st->st_dev; 766 fam_dir->st_ino = st->st_ino; 767 } 768 769 if (ck_lnk) { 770 if (fn_is_dir) { 771 /*(temporarily modify fn)*/ 772 char e = fn[dirlen]; 773 fn[dirlen] = '\0'; 774 if (0 != lstat(fn, &lst)) { 775 fn[dirlen] = e; 776 return NULL; 777 } 778 fn[dirlen] = e; 779 } 780 if (fam_dir->fam_parent) { 781 --fam_dir->fam_parent->refcnt; 782 fam_dir->fam_parent = NULL; 783 } 784 if (S_ISLNK(lst.st_mode)) { 785 fam_dir->fam_parent = fam_dir_monitor(scf, fn, dirlen, &lst); 786 } 787 } 788 789 ++fam_dir->refcnt; 790 return fam_dir; 791 } 792 793 #endif 794 795 796 __attribute_malloc__ 797 __attribute_returns_nonnull__ 798 static stat_cache_entry * stat_cache_entry_init(void) { 799 stat_cache_entry *sce = calloc(1, sizeof(*sce)); 800 force_assert(NULL != sce); 801 sce->fd = -1; 802 sce->refcnt = 1; 803 return sce; 804 } 805 806 static void stat_cache_entry_free(void *data) { 807 stat_cache_entry *sce = data; 808 if (!sce) return; 809 810 if (--sce->refcnt) return; 811 812 #ifdef HAVE_FAM_H 813 /*(decrement refcnt only; 814 * defer cancelling FAM monitor on dir even if refcnt reaches zero)*/ 815 if (sce->fam_dir) --((fam_dir_entry *)sce->fam_dir)->refcnt; 816 #endif 817 818 free(sce->name.ptr); 819 free(sce->etag.ptr); 820 if (sce->content_type.size) free(sce->content_type.ptr); 821 if (sce->fd >= 0) close(sce->fd); 822 823 free(sce); 824 } 825 826 void stat_cache_entry_refchg(void *data, int mod) { 827 /*(expect mod == -1 or mod == 1)*/ 828 stat_cache_entry * const sce = data; 829 if (mod < 0 && 1 == sce->refcnt) 830 stat_cache_entry_free(data); 831 else 832 sce->refcnt += mod; 833 } 834 835 #if defined(HAVE_XATTR) || defined(HAVE_EXTATTR) 836 837 static const char *attrname = "Content-Type"; 838 static char attrval[128]; 839 static buffer attrb = { attrval, 0, 0 }; 840 841 static int stat_cache_attr_get(const char *name) { 842 #if defined(HAVE_XATTR) 843 #if defined(HAVE_SYS_XATTR_H) 844 ssize_t attrlen; 845 if (0 < (attrlen = getxattr(name, attrname, 846 attrval, sizeof(attrval)-1))) 847 #else 848 int attrlen = sizeof(attrval)-1; 849 if (0 == attr_get(name, attrname, attrval, &attrlen, 0)) 850 #endif 851 #elif defined(HAVE_EXTATTR) 852 ssize_t attrlen; 853 if (0 < (attrlen = extattr_get_file(name, EXTATTR_NAMESPACE_USER, attrname, 854 attrval, sizeof(attrval)-1))) 855 #endif 856 { 857 attrval[attrlen] = '\0'; 858 attrb.used = (uint32_t)(attrlen + 1); 859 return 1; 860 } 861 return 0; 862 } 863 864 #endif 865 866 int stat_cache_init(fdevents *ev, log_error_st *errh) { 867 #ifdef HAVE_FAM_H 868 if (sc.stat_cache_engine == STAT_CACHE_ENGINE_FAM) { 869 sc.scf = stat_cache_init_fam(ev, errh); 870 if (NULL == sc.scf) return 0; 871 } 872 #else 873 UNUSED(ev); 874 UNUSED(errh); 875 #endif 876 877 return 1; 878 } 879 880 void stat_cache_free(void) { 881 splay_tree *sptree = sc.files; 882 while (sptree) { 883 stat_cache_entry_free(sptree->data); 884 sptree = splaytree_delete(sptree, sptree->key); 885 } 886 sc.files = NULL; 887 888 #ifdef HAVE_FAM_H 889 stat_cache_free_fam(sc.scf); 890 sc.scf = NULL; 891 #endif 892 893 #if defined(HAVE_XATTR) || defined(HAVE_EXTATTR) 894 attrname = "Content-Type"; 895 #endif 896 897 sc.stat_cache_engine = STAT_CACHE_ENGINE_SIMPLE; /*(default)*/ 898 } 899 900 void stat_cache_xattrname (const char *name) { 901 #if defined(HAVE_XATTR) || defined(HAVE_EXTATTR) 902 attrname = name; 903 #else 904 UNUSED(name); 905 #endif 906 } 907 908 int stat_cache_choose_engine (const buffer *stat_cache_string, log_error_st *errh) { 909 if (buffer_is_blank(stat_cache_string)) 910 sc.stat_cache_engine = STAT_CACHE_ENGINE_SIMPLE; 911 else if (buffer_eq_slen(stat_cache_string, CONST_STR_LEN("simple"))) 912 sc.stat_cache_engine = STAT_CACHE_ENGINE_SIMPLE; 913 #ifdef HAVE_SYS_INOTIFY_H 914 else if (buffer_eq_slen(stat_cache_string, CONST_STR_LEN("inotify"))) 915 sc.stat_cache_engine = STAT_CACHE_ENGINE_INOTIFY; 916 /*(STAT_CACHE_ENGINE_FAM == STAT_CACHE_ENGINE_INOTIFY)*/ 917 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 918 else if (buffer_eq_slen(stat_cache_string, CONST_STR_LEN("kqueue"))) 919 sc.stat_cache_engine = STAT_CACHE_ENGINE_KQUEUE; 920 /*(STAT_CACHE_ENGINE_FAM == STAT_CACHE_ENGINE_KQUEUE)*/ 921 #endif 922 #ifdef HAVE_FAM_H 923 else if (buffer_eq_slen(stat_cache_string, CONST_STR_LEN("fam"))) 924 sc.stat_cache_engine = STAT_CACHE_ENGINE_FAM; 925 #endif 926 else if (buffer_eq_slen(stat_cache_string, CONST_STR_LEN("disable")) 927 || buffer_eq_slen(stat_cache_string, CONST_STR_LEN("none"))) 928 sc.stat_cache_engine = STAT_CACHE_ENGINE_NONE; 929 else { 930 log_error(errh, __FILE__, __LINE__, 931 "server.stat-cache-engine can be one of \"disable\", \"simple\"," 932 #ifdef HAVE_SYS_INOTIFY_H 933 " \"inotify\"," 934 #elif defined HAVE_SYS_EVENT_H && defined HAVE_KQUEUE 935 " \"kqueue\"," 936 #endif 937 #ifdef HAVE_FAM_H 938 " \"fam\"," 939 #endif 940 " but not: %s", stat_cache_string->ptr); 941 return -1; 942 } 943 return 0; 944 } 945 946 const buffer * stat_cache_mimetype_by_ext(const array * const mimetypes, const char * const name, const uint32_t nlen) 947 { 948 const char * const end = name + nlen; /*(end of string)*/ 949 const uint32_t used = mimetypes->used; 950 if (used < 16) { 951 for (uint32_t i = 0; i < used; ++i) { 952 /* suffix match */ 953 const data_string *ds = (data_string *)mimetypes->data[i]; 954 const size_t klen = buffer_clen(&ds->key); 955 if (klen <= nlen && buffer_eq_icase_ssn(end-klen, ds->key.ptr, klen)) 956 return &ds->value; 957 } 958 } 959 else { 960 const char *s; 961 const data_string *ds; 962 if (nlen) { 963 for (s = end-1; s != name && *s != '/'; --s) ; /*(like memrchr())*/ 964 if (*s == '/') ++s; 965 } 966 else { 967 s = name; 968 } 969 /* search for basename, then longest .ext2.ext1, then .ext1, then "" */ 970 ds = (const data_string *)array_get_element_klen(mimetypes, s, end - s); 971 if (NULL != ds) return &ds->value; 972 while (++s < end) { 973 while (*s != '.' && ++s != end) ; 974 if (s == end) break; 975 /* search ".ext" then "ext" */ 976 ds = (const data_string *)array_get_element_klen(mimetypes, s, end - s); 977 if (NULL != ds) return &ds->value; 978 /* repeat search without leading '.' to handle situation where 979 * admin configured mimetype.assign keys without leading '.' */ 980 if (++s < end) { 981 if (*s == '.') { --s; continue; } 982 ds = (const data_string *)array_get_element_klen(mimetypes, s, end - s); 983 if (NULL != ds) return &ds->value; 984 } 985 } 986 /* search for ""; catchall */ 987 ds = (const data_string *)array_get_element_klen(mimetypes, CONST_STR_LEN("")); 988 if (NULL != ds) return &ds->value; 989 } 990 991 return NULL; 992 } 993 994 #if defined(HAVE_XATTR) || defined(HAVE_EXTATTR) 995 996 const buffer * stat_cache_mimetype_by_xattr(const char * const name) 997 { 998 return stat_cache_attr_get(name) ? &attrb : NULL; 999 } 1000 1001 const buffer * stat_cache_content_type_get_by_xattr(stat_cache_entry *sce, const array *mimetypes, int use_xattr) 1002 { 1003 /*(invalid caching if user config has multiple, different 1004 * r->conf.mimetypes for same extension (not expected))*/ 1005 if (!buffer_is_blank(&sce->content_type)) return &sce->content_type; 1006 1007 if (!S_ISREG(sce->st.st_mode)) return NULL; 1008 1009 /* cache mimetype */ 1010 const buffer *mtype = 1011 (use_xattr) ? stat_cache_mimetype_by_xattr(sce->name.ptr) : NULL; 1012 if (NULL == mtype) 1013 mtype = stat_cache_mimetype_by_ext(mimetypes, BUF_PTR_LEN(&sce->name)); 1014 if (NULL != mtype) { 1015 if (sce->content_type.size) { 1016 buffer_copy_buffer(&sce->content_type, mtype); 1017 } 1018 else if (mtype == &attrb) { 1019 sce->content_type.ptr = NULL; 1020 buffer_copy_buffer(&sce->content_type, mtype); 1021 } 1022 else { 1023 /*(copy pointers from mimetypes array; avoid allocation)*/ 1024 sce->content_type.ptr = mtype->ptr; 1025 sce->content_type.used = mtype->used; 1026 /*(leave sce->content_type.size = 0 to flag not-allocated)*/ 1027 } 1028 } 1029 else 1030 buffer_clear(&sce->content_type); 1031 1032 return &sce->content_type; 1033 } 1034 1035 #else 1036 1037 const buffer * stat_cache_content_type_get_by_ext(stat_cache_entry *sce, const array *mimetypes) 1038 { 1039 /*(invalid caching if user config has multiple, different 1040 * r->conf.mimetypes for same extension (not expected))*/ 1041 if (!buffer_is_blank(&sce->content_type)) return &sce->content_type; 1042 1043 if (!S_ISREG(sce->st.st_mode)) return NULL; 1044 1045 /* cache mimetype */ 1046 const buffer * const mtype = 1047 stat_cache_mimetype_by_ext(mimetypes, BUF_PTR_LEN(&sce->name)); 1048 if (NULL != mtype) { 1049 /*(copy pointers from mimetypes array; avoid allocation)*/ 1050 sce->content_type.ptr = mtype->ptr; 1051 sce->content_type.used = mtype->used; 1052 /*(leave sce->content_type.size = 0 to flag not-allocated)*/ 1053 } 1054 else 1055 buffer_clear(&sce->content_type); 1056 1057 return &sce->content_type; 1058 } 1059 1060 #endif 1061 1062 const buffer * stat_cache_etag_get(stat_cache_entry *sce, int flags) { 1063 /*(invalid caching if user cfg has multiple, different r->conf.etag_flags 1064 * for same path (not expected, since etag flags should be by filesystem))*/ 1065 if (!buffer_is_blank(&sce->etag)) return &sce->etag; 1066 1067 if (S_ISREG(sce->st.st_mode) || S_ISDIR(sce->st.st_mode)) { 1068 if (0 == flags) return NULL; 1069 http_etag_create(&sce->etag, &sce->st, flags); 1070 return &sce->etag; 1071 } 1072 1073 return NULL; 1074 } 1075 1076 __attribute_pure__ 1077 static int stat_cache_stat_eq(const struct stat * const sta, const struct stat * const stb) { 1078 return 1079 #ifdef st_mtime /* use high-precision timestamp if available */ 1080 #if defined(__APPLE__) && defined(__MACH__) 1081 sta->st_mtimespec.tv_nsec == stb->st_mtimespec.tv_nsec 1082 #else 1083 sta->st_mtim.tv_nsec == stb->st_mtim.tv_nsec 1084 #endif 1085 #else 1086 1 1087 #endif 1088 && sta->st_mtime == stb->st_mtime 1089 && sta->st_size == stb->st_size 1090 && sta->st_ino == stb->st_ino 1091 && sta->st_dev == stb->st_dev; 1092 } 1093 1094 void stat_cache_update_entry(const char *name, uint32_t len, 1095 const struct stat *st, const buffer *etagb) 1096 { 1097 if (sc.stat_cache_engine == STAT_CACHE_ENGINE_NONE) return; 1098 force_assert(0 != len); 1099 if (name[len-1] == '/') { if (0 == --len) len = 1; } 1100 splay_tree **sptree = &sc.files; 1101 stat_cache_entry *sce = 1102 stat_cache_sptree_find(sptree, name, len); 1103 if (sce && buffer_is_equal_string(&sce->name, name, len)) { 1104 if (!stat_cache_stat_eq(&sce->st, st)) { 1105 /* etagb might be NULL to clear etag (invalidate) */ 1106 buffer_clear(&sce->etag); 1107 if (etagb) 1108 buffer_copy_string_len(&sce->etag, BUF_PTR_LEN(etagb)); 1109 #if defined(HAVE_XATTR) || defined(HAVE_EXTATTR) 1110 buffer_clear(&sce->content_type); 1111 #endif 1112 if (sce->fd >= 0) { 1113 if (1 == sce->refcnt) { 1114 close(sce->fd); 1115 sce->fd = -1; 1116 } 1117 else { 1118 --sce->refcnt; /* stat_cache_entry_free(sce); */ 1119 (*sptree)->data = sce = stat_cache_entry_init(); 1120 buffer_copy_string_len(&sce->name, name, len); 1121 } 1122 } 1123 sce->st = *st; 1124 } 1125 sce->stat_ts = log_monotonic_secs; 1126 } 1127 } 1128 1129 void stat_cache_delete_entry(const char *name, uint32_t len) 1130 { 1131 if (sc.stat_cache_engine == STAT_CACHE_ENGINE_NONE) return; 1132 force_assert(0 != len); 1133 if (name[len-1] == '/') { if (0 == --len) len = 1; } 1134 splay_tree **sptree = &sc.files; 1135 stat_cache_entry *sce = stat_cache_sptree_find(sptree, name, len); 1136 if (sce && buffer_is_equal_string(&sce->name, name, len)) { 1137 stat_cache_entry_free(sce); 1138 *sptree = splaytree_delete(*sptree, (*sptree)->key); 1139 } 1140 } 1141 1142 void stat_cache_invalidate_entry(const char *name, uint32_t len) 1143 { 1144 splay_tree **sptree = &sc.files; 1145 stat_cache_entry *sce = stat_cache_sptree_find(sptree, name, len); 1146 if (sce && buffer_is_equal_string(&sce->name, name, len)) { 1147 sce->stat_ts = 0; 1148 #ifdef HAVE_FAM_H 1149 if (sce->fam_dir != NULL) { 1150 --((fam_dir_entry *)sce->fam_dir)->refcnt; 1151 sce->fam_dir = NULL; 1152 } 1153 #endif 1154 } 1155 } 1156 1157 #ifdef HAVE_FAM_H 1158 1159 static void stat_cache_invalidate_dir_tree_walk(splay_tree *t, 1160 const char *name, size_t len) 1161 { 1162 if (t->left) stat_cache_invalidate_dir_tree_walk(t->left, name, len); 1163 if (t->right) stat_cache_invalidate_dir_tree_walk(t->right, name, len); 1164 1165 const buffer * const b = &((stat_cache_entry *)t->data)->name; 1166 const size_t blen = buffer_clen(b); 1167 if (blen > len && b->ptr[len] == '/' && 0 == memcmp(b->ptr, name, len)) { 1168 stat_cache_entry *sce = t->data; 1169 sce->stat_ts = 0; 1170 if (sce->fam_dir != NULL) { 1171 --((fam_dir_entry *)sce->fam_dir)->refcnt; 1172 sce->fam_dir = NULL; 1173 } 1174 } 1175 } 1176 1177 static void stat_cache_invalidate_dir_tree(const char *name, size_t len) 1178 { 1179 splay_tree * const sptree = sc.files; 1180 if (sptree) stat_cache_invalidate_dir_tree_walk(sptree, name, len); 1181 } 1182 1183 #endif 1184 1185 /* 1186 * walk though splay_tree and collect contents of dir tree. 1187 * remove tagged entries in a second loop 1188 */ 1189 1190 static void stat_cache_tag_dir_tree(splay_tree *t, const char *name, size_t len, 1191 int *keys, int *ndx) 1192 { 1193 if (*ndx == 8192) return; /*(must match num array entries in keys[])*/ 1194 if (t->left) stat_cache_tag_dir_tree(t->left, name, len, keys, ndx); 1195 if (t->right) stat_cache_tag_dir_tree(t->right, name, len, keys, ndx); 1196 if (*ndx == 8192) return; /*(must match num array entries in keys[])*/ 1197 1198 const buffer * const b = &((stat_cache_entry *)t->data)->name; 1199 const size_t blen = buffer_clen(b); 1200 if (blen > len && b->ptr[len] == '/' && 0 == memcmp(b->ptr, name, len)) 1201 keys[(*ndx)++] = t->key; 1202 } 1203 1204 __attribute_noinline__ 1205 static void stat_cache_prune_dir_tree(const char *name, size_t len) 1206 { 1207 splay_tree *sptree = sc.files; 1208 int max_ndx, i; 1209 int keys[8192]; /* 32k size on stack */ 1210 do { 1211 if (!sptree) break; 1212 max_ndx = 0; 1213 stat_cache_tag_dir_tree(sptree, name, len, keys, &max_ndx); 1214 for (i = 0; i < max_ndx; ++i) { 1215 const int ndx = keys[i]; 1216 splay_tree *node = sptree = splaytree_splay(sptree, ndx); 1217 if (node && node->key == ndx) { 1218 stat_cache_entry_free(node->data); 1219 sptree = splaytree_delete(sptree, ndx); 1220 } 1221 } 1222 } while (max_ndx == sizeof(keys)/sizeof(int)); 1223 sc.files = sptree; 1224 } 1225 1226 static void stat_cache_delete_tree(const char *name, uint32_t len) 1227 { 1228 stat_cache_delete_entry(name, len); 1229 stat_cache_prune_dir_tree(name, len); 1230 } 1231 1232 void stat_cache_delete_dir(const char *name, uint32_t len) 1233 { 1234 force_assert(0 != len); 1235 if (name[len-1] == '/') { if (0 == --len) len = 1; } 1236 stat_cache_delete_tree(name, len); 1237 #ifdef HAVE_FAM_H 1238 if (sc.stat_cache_engine == STAT_CACHE_ENGINE_FAM) { 1239 splay_tree **sptree = &sc.scf->dirs; 1240 fam_dir_entry *fam_dir = stat_cache_sptree_find(sptree, name, len); 1241 if (fam_dir && buffer_eq_slen(&fam_dir->name, name, len)) 1242 fam_dir_invalidate_node(fam_dir); 1243 if (*sptree) fam_dir_invalidate_tree(*sptree, name, len); 1244 fam_dir_periodic_cleanup(); 1245 } 1246 #endif 1247 } 1248 1249 /*** 1250 * 1251 * 1252 * 1253 * returns: 1254 * - HANDLER_FINISHED on cache-miss (don't forget to reopen the file) 1255 * - HANDLER_ERROR on stat() failed -> see errno for problem 1256 */ 1257 1258 stat_cache_entry * stat_cache_get_entry(const buffer * const name) { 1259 stat_cache_entry *sce = NULL; 1260 1261 /* consistency: ensure lookup name does not end in '/' unless root "/" 1262 * (but use full path given with stat(), even with trailing '/') */ 1263 int final_slash = 0; 1264 size_t len = buffer_clen(name); 1265 force_assert(0 != len); 1266 if (name->ptr[len-1] == '/') { final_slash = 1; if (0 == --len) len = 1; } 1267 /* Note: paths are expected to be normalized before calling stat_cache, 1268 * e.g. without repeated '/' */ 1269 1270 if (name->ptr[0] != '/') { 1271 errno = EINVAL; 1272 return NULL; 1273 } 1274 1275 /* 1276 * check if the directory for this file has changed 1277 */ 1278 1279 const unix_time64_t cur_ts = log_monotonic_secs; 1280 1281 const int file_ndx = splaytree_djbhash(name->ptr, len); 1282 splay_tree *sptree = sc.files = splaytree_splay(sc.files, file_ndx); 1283 1284 if (sptree && (sptree->key == file_ndx)) { 1285 /* we have seen this file already and 1286 * don't stat() it again in the same second */ 1287 1288 sce = sptree->data; 1289 1290 /* check if the name is the same, we might have a collision */ 1291 1292 if (buffer_is_equal_string(&sce->name, name->ptr, len)) { 1293 if (sc.stat_cache_engine == STAT_CACHE_ENGINE_SIMPLE) { 1294 if (sce->stat_ts == cur_ts) { 1295 if (final_slash && !S_ISDIR(sce->st.st_mode)) { 1296 errno = ENOTDIR; 1297 return NULL; 1298 } 1299 return sce; 1300 } 1301 } 1302 #ifdef HAVE_FAM_H 1303 else if (sc.stat_cache_engine == STAT_CACHE_ENGINE_FAM 1304 && sce->fam_dir) { /* entry is in monitored dir */ 1305 /* re-stat() periodically, even if monitoring for changes 1306 * (due to limitations in stat_cache.c use of FAM) 1307 * (gaps due to not continually monitoring an entire tree) */ 1308 if (cur_ts - sce->stat_ts < 16) { 1309 if (final_slash && !S_ISDIR(sce->st.st_mode)) { 1310 errno = ENOTDIR; 1311 return NULL; 1312 } 1313 return sce; 1314 } 1315 } 1316 #endif 1317 } else { 1318 /* collision, forget about the entry */ 1319 sce = NULL; 1320 } 1321 } 1322 1323 struct stat st; 1324 if (-1 == stat(name->ptr, &st)) { 1325 return NULL; 1326 } 1327 1328 if (NULL == sce) { 1329 1330 /* fix broken stat/open for symlinks to reg files with appended slash on freebsd,osx */ 1331 if (final_slash && S_ISREG(st.st_mode)) { 1332 errno = ENOTDIR; 1333 return NULL; 1334 } 1335 1336 sce = stat_cache_entry_init(); 1337 buffer_copy_string_len(&sce->name, name->ptr, len); 1338 1339 /* already splayed file_ndx */ 1340 if (NULL != sptree && sptree->key == file_ndx) { 1341 /* hash collision: replace old entry */ 1342 stat_cache_entry_free(sptree->data); 1343 sptree->data = sce; 1344 } else { 1345 /*sptree =*/ sc.files = splaytree_insert(sptree, file_ndx, sce); 1346 } 1347 1348 } else { 1349 1350 buffer_clear(&sce->etag); 1351 #if defined(HAVE_XATTR) || defined(HAVE_EXTATTR) 1352 buffer_clear(&sce->content_type); 1353 #endif 1354 1355 /* close fd if file changed */ 1356 if (sce->fd >= 0 && !stat_cache_stat_eq(&sce->st, &st)) { 1357 if (1 == sce->refcnt) { 1358 close(sce->fd); 1359 sce->fd = -1; 1360 } 1361 else { 1362 --sce->refcnt; /* stat_cache_entry_free(sce); */ 1363 sptree->data = sce = stat_cache_entry_init(); 1364 buffer_copy_string_len(&sce->name, name->ptr, len); 1365 } 1366 } 1367 } 1368 1369 sce->st = st; /*(copy prior to calling fam_dir_monitor())*/ 1370 1371 #ifdef HAVE_FAM_H 1372 if (sc.stat_cache_engine == STAT_CACHE_ENGINE_FAM) { 1373 if (sce->fam_dir) --((fam_dir_entry *)sce->fam_dir)->refcnt; 1374 sce->fam_dir = 1375 fam_dir_monitor(sc.scf, name->ptr, len, &st); 1376 #if 0 /*(performed below)*/ 1377 if (NULL != sce->fam_dir) { 1378 /*(may have been invalidated by dir change)*/ 1379 sce->stat_ts = cur_ts; 1380 } 1381 #endif 1382 } 1383 #endif 1384 1385 sce->stat_ts = cur_ts; 1386 return sce; 1387 } 1388 1389 stat_cache_entry * stat_cache_get_entry_open(const buffer * const name, const int symlinks) { 1390 stat_cache_entry * const sce = stat_cache_get_entry(name); 1391 if (NULL == sce) return NULL; 1392 if (sce->fd >= 0) return sce; 1393 if (sce->st.st_size > 0) { 1394 sce->fd = stat_cache_open_rdonly_fstat(name, &sce->st, symlinks); 1395 buffer_clear(&sce->etag); 1396 } 1397 return sce; /* (note: sce->fd might still be -1 if open() failed) */ 1398 } 1399 1400 const stat_cache_st * stat_cache_path_stat (const buffer * const name) { 1401 const stat_cache_entry * const sce = stat_cache_get_entry(name); 1402 return sce ? &sce->st : NULL; 1403 } 1404 1405 int stat_cache_path_isdir(const buffer *name) { 1406 const stat_cache_entry * const sce = stat_cache_get_entry(name); 1407 return (sce && (S_ISDIR(sce->st.st_mode) ? 1 : (errno = ENOTDIR, 0))); 1408 } 1409 1410 int stat_cache_path_contains_symlink(const buffer *name, log_error_st *errh) { 1411 /* caller should check for symlinks only if we should block symlinks. */ 1412 1413 /* catch the obvious symlinks 1414 * 1415 * this is not a secure check as we still have a race-condition between 1416 * the stat() and the open. We can only solve this by 1417 * 1. open() the file 1418 * 2. fstat() the fd 1419 * 1420 * and keeping the file open for the rest of the time. But this can 1421 * only be done at network level. 1422 * */ 1423 1424 #ifdef HAVE_LSTAT 1425 /* we assume "/" can not be symlink, 1426 * so skip the symlink stuff if path is "/" */ 1427 size_t len = buffer_clen(name); 1428 force_assert(0 != len); 1429 force_assert(name->ptr[0] == '/'); 1430 if (1 == len) return 0; 1431 #ifndef PATH_MAX 1432 #define PATH_MAX 4096 1433 #endif 1434 if (len >= PATH_MAX) return -1; 1435 1436 char buf[PATH_MAX]; 1437 memcpy(buf, name->ptr, len); 1438 char *s_cur = buf+len; 1439 do { 1440 *s_cur = '\0'; 1441 struct stat st; 1442 if (0 == lstat(buf, &st)) { 1443 if (S_ISLNK(st.st_mode)) return 1; 1444 } 1445 else { 1446 log_perror(errh, __FILE__, __LINE__, "lstat failed for: %s", buf); 1447 return -1; 1448 } 1449 } while ((s_cur = strrchr(buf, '/')) > buf); /*(&buf[0]==buf; NULL < buf)*/ 1450 #endif 1451 1452 return 0; 1453 } 1454 1455 int stat_cache_open_rdonly_fstat (const buffer *name, struct stat *st, int symlinks) { 1456 /*(Note: O_NOFOLLOW affects only the final path segment, the target file, 1457 * not any intermediate symlinks along the path)*/ 1458 const int fd = fdevent_open_cloexec(name->ptr, symlinks, O_RDONLY, 0); 1459 if (fd >= 0) { 1460 if (0 == fstat(fd, st)) { 1461 return fd; 1462 } else { 1463 const int errnum = errno; 1464 close(fd); 1465 errno = errnum; 1466 } 1467 } 1468 return -1; 1469 } 1470 1471 /** 1472 * remove stat() from cache which haven't been stat()ed for 1473 * more than 2 seconds 1474 * 1475 * 1476 * walk though the stat-cache, collect the ids which are too old 1477 * and remove them in a second loop 1478 */ 1479 1480 static void stat_cache_tag_old_entries(splay_tree * const t, int * const keys, int * const ndx, const time_t max_age, const unix_time64_t cur_ts) { 1481 if (*ndx == 8192) return; /*(must match num array entries in keys[])*/ 1482 if (t->left) 1483 stat_cache_tag_old_entries(t->left, keys, ndx, max_age, cur_ts); 1484 if (t->right) 1485 stat_cache_tag_old_entries(t->right, keys, ndx, max_age, cur_ts); 1486 if (*ndx == 8192) return; /*(must match num array entries in keys[])*/ 1487 1488 const stat_cache_entry * const sce = t->data; 1489 if (cur_ts - sce->stat_ts > max_age) 1490 keys[(*ndx)++] = t->key; 1491 } 1492 1493 static void stat_cache_periodic_cleanup(const time_t max_age, const unix_time64_t cur_ts) { 1494 splay_tree *sptree = sc.files; 1495 int max_ndx, i; 1496 int keys[8192]; /* 32k size on stack */ 1497 do { 1498 if (!sptree) break; 1499 max_ndx = 0; 1500 stat_cache_tag_old_entries(sptree, keys, &max_ndx, max_age, cur_ts); 1501 for (i = 0; i < max_ndx; ++i) { 1502 int ndx = keys[i]; 1503 sptree = splaytree_splay(sptree, ndx); 1504 if (sptree && sptree->key == ndx) { 1505 stat_cache_entry_free(sptree->data); 1506 sptree = splaytree_delete(sptree, ndx); 1507 } 1508 } 1509 } while (max_ndx == sizeof(keys)/sizeof(int)); 1510 sc.files = sptree; 1511 } 1512 1513 void stat_cache_trigger_cleanup(void) { 1514 time_t max_age = 2; 1515 1516 #ifdef HAVE_FAM_H 1517 if (STAT_CACHE_ENGINE_FAM == sc.stat_cache_engine) { 1518 if (log_monotonic_secs & 0x1F) return; 1519 /* once every 32 seconds (0x1F == 31) */ 1520 max_age = 32; 1521 fam_dir_periodic_cleanup(); 1522 /* By doing this before stat_cache_periodic_cleanup(), 1523 * entries used within the next max_age secs will remain 1524 * monitored, instead of effectively flushing and 1525 * rebuilding the FAM monitoring every max_age seconds */ 1526 } 1527 #endif 1528 1529 stat_cache_periodic_cleanup(max_age, log_monotonic_secs); 1530 } 1531