1 /*
2 * gw_backend - gateway backend code shared by dynamic socket backends
3 *
4 * Copyright(c) 2017 Glenn Strauss gstrauss()gluelogic.com All rights reserved
5 * License: BSD 3-clause (same as lighttpd)
6 */
7 #include "first.h"
8
9 #include "gw_backend.h"
10
11 #include <sys/types.h>
12 #include <sys/stat.h>
13 #include "sys-socket.h"
14 #ifdef HAVE_SYS_UIO_H
15 #include <sys/uio.h>
16 #endif
17 #ifdef HAVE_SYS_WAIT_H
18 #include <sys/wait.h>
19 #endif
20
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <limits.h>
24 #include <stdint.h>
25 #include <stdlib.h>
26 #include <signal.h>
27 #include <string.h>
28 #include <unistd.h>
29
30 #include "base.h"
31 #include "algo_md.h"
32 #include "array.h"
33 #include "buffer.h"
34 #include "chunk.h"
35 #include "fdevent.h"
36 #include "http_header.h"
37 #include "log.h"
38 #include "sock_addr.h"
39
40
41
42
43 __attribute_noinline__
gw_status_get_counter(gw_host * host,gw_proc * proc,const char * tag,size_t tlen)44 static int * gw_status_get_counter(gw_host *host, gw_proc *proc, const char *tag, size_t tlen) {
45 /*(At the cost of some memory, could prepare strings for host and for proc
46 * so that here we would copy ready made string for proc (or if NULL,
47 * for host), and then append tag to produce key)*/
48 char label[288];
49 size_t llen = sizeof("gw.backend.")-1, len;
50 memcpy(label, "gw.backend.", llen);
51
52 len = buffer_clen(host->id);
53 if (len) {
54 force_assert(len < sizeof(label) - llen);
55 memcpy(label+llen, host->id->ptr, len);
56 llen += len;
57 }
58
59 if (proc) {
60 force_assert(llen < sizeof(label) - (LI_ITOSTRING_LENGTH + 1));
61 label[llen++] = '.';
62 len = li_utostrn(label+llen, LI_ITOSTRING_LENGTH, proc->id);
63 llen += len;
64 }
65
66 force_assert(tlen < sizeof(label) - llen);
67 memcpy(label+llen, tag, tlen);
68 llen += tlen;
69 label[llen] = '\0';
70
71 return plugin_stats_get_ptr(label, llen);
72 }
73
gw_proc_tag_inc(gw_host * host,gw_proc * proc,const char * tag,size_t len)74 static void gw_proc_tag_inc(gw_host *host, gw_proc *proc, const char *tag, size_t len) {
75 ++(*gw_status_get_counter(host, proc, tag, len));
76 }
77
gw_proc_connected_inc(gw_host * host,gw_proc * proc)78 static void gw_proc_connected_inc(gw_host *host, gw_proc *proc) {
79 UNUSED(host);
80 ++(*proc->stats_connected); /* "gw.backend...connected" */
81 }
82
gw_proc_load_inc(gw_host * host,gw_proc * proc)83 static void gw_proc_load_inc(gw_host *host, gw_proc *proc) {
84 *proc->stats_load = ++proc->load; /* "gw.backend...load" */
85 ++(*host->stats_global_active); /* "gw.active-requests" */
86 }
87
gw_proc_load_dec(gw_host * host,gw_proc * proc)88 static void gw_proc_load_dec(gw_host *host, gw_proc *proc) {
89 *proc->stats_load = --proc->load; /* "gw.backend...load" */
90 --(*host->stats_global_active); /* "gw.active-requests" */
91 }
92
gw_host_assign(gw_host * host)93 static void gw_host_assign(gw_host *host) {
94 *host->stats_load = ++host->load; /* "gw.backend...load" */
95 }
96
gw_host_reset(gw_host * host)97 static void gw_host_reset(gw_host *host) {
98 *host->stats_load = --host->load; /* "gw.backend...load" */
99 }
100
gw_status_init_proc(gw_host * host,gw_proc * proc)101 static void gw_status_init_proc(gw_host *host, gw_proc *proc) {
102 *gw_status_get_counter(host, proc, CONST_STR_LEN(".disabled")) = 0;
103 *gw_status_get_counter(host, proc, CONST_STR_LEN(".died")) = 0;
104 *gw_status_get_counter(host, proc, CONST_STR_LEN(".overloaded")) = 0;
105 proc->stats_connected =
106 gw_status_get_counter(host, proc, CONST_STR_LEN(".connected"));
107 *proc->stats_connected = 0;
108 proc->stats_load =
109 gw_status_get_counter(host, proc, CONST_STR_LEN(".load"));
110 *proc->stats_load = 0;
111 }
112
gw_status_init_host(gw_host * host)113 static void gw_status_init_host(gw_host *host) {
114 host->stats_load =
115 gw_status_get_counter(host, NULL, CONST_STR_LEN(".load"));
116 *host->stats_load = 0;
117 host->stats_global_active =
118 plugin_stats_get_ptr("gw.active-requests",sizeof("gw.active-requests")-1);
119 }
120
121
122
123
124 __attribute_cold__
gw_proc_set_state(gw_host * host,gw_proc * proc,int state)125 static void gw_proc_set_state(gw_host *host, gw_proc *proc, int state) {
126 if ((int)proc->state == state) return;
127 if (proc->state == PROC_STATE_RUNNING) {
128 --host->active_procs;
129 } else if (state == PROC_STATE_RUNNING) {
130 ++host->active_procs;
131 }
132 proc->state = state;
133 }
134
135
136 __attribute_cold__
137 __attribute_noinline__
gw_proc_init_portpath(gw_host * host,gw_proc * proc)138 static void gw_proc_init_portpath(gw_host *host, gw_proc *proc) {
139 if (!host->unixsocket) {
140 proc->port = host->port + proc->id;
141 return;
142 }
143
144 if (!proc->unixsocket)
145 proc->unixsocket = buffer_init();
146
147 if (!host->bin_path)
148 buffer_copy_buffer(proc->unixsocket, host->unixsocket);
149 else {
150 buffer_clear(proc->unixsocket);
151 buffer_append_str2(proc->unixsocket, BUF_PTR_LEN(host->unixsocket),
152 CONST_STR_LEN("-"));
153 buffer_append_int(proc->unixsocket, proc->id);
154 }
155 }
156
157 __attribute_cold__
158 __attribute_noinline__
159 __attribute_returns_nonnull__
gw_proc_init(gw_host * host)160 static gw_proc *gw_proc_init(gw_host *host) {
161 gw_proc *proc = ck_calloc(1, sizeof(*proc));
162
163 /*proc->unixsocket = buffer_init();*//*(init on demand)*/
164 proc->connection_name = buffer_init();
165
166 proc->prev = NULL;
167 proc->next = NULL;
168 proc->state = PROC_STATE_DIED;
169
170 proc->id = host->max_id++;
171 gw_status_init_proc(host, proc); /*(proc->id must be set)*/
172 gw_proc_init_portpath(host, proc);
173
174 return proc;
175 }
176
177 __attribute_cold__
178 __attribute_noinline__
gw_proc_free(gw_proc * proc)179 static void gw_proc_free(gw_proc *proc) {
180 if (!proc) return;
181
182 gw_proc_free(proc->next);
183
184 buffer_free(proc->unixsocket);
185 buffer_free(proc->connection_name);
186 free(proc->saddr);
187
188 free(proc);
189 }
190
191 __attribute_malloc__
192 __attribute_returns_nonnull__
gw_host_init(void)193 static gw_host *gw_host_init(void) {
194 return ck_calloc(1, sizeof(gw_host));
195 }
196
gw_host_free(gw_host * h)197 static void gw_host_free(gw_host *h) {
198 if (!h) return;
199 if (h->refcount) {
200 --h->refcount;
201 return;
202 }
203
204 gw_proc_free(h->first);
205 gw_proc_free(h->unused_procs);
206
207 for (uint32_t i = 0; i < h->args.used; ++i) free(h->args.ptr[i]);
208 free(h->args.ptr);
209 free(h);
210 }
211
212 __attribute_malloc__
213 __attribute_returns_nonnull__
gw_extensions_init(void)214 static gw_exts *gw_extensions_init(void) {
215 return ck_calloc(1, sizeof(gw_exts));
216 }
217
gw_extensions_free(gw_exts * f)218 static void gw_extensions_free(gw_exts *f) {
219 if (!f) return;
220 for (uint32_t i = 0; i < f->used; ++i) {
221 gw_extension *fe = f->exts+i;
222 for (uint32_t j = 0; j < fe->used; ++j) {
223 gw_host_free(fe->hosts[j]);
224 }
225 free(fe->hosts);
226 }
227 free(f->exts);
228 free(f);
229 }
230
gw_extension_insert(gw_exts * ext,const buffer * key,gw_host * fh)231 static int gw_extension_insert(gw_exts *ext, const buffer *key, gw_host *fh) {
232 gw_extension *fe = NULL;
233 for (uint32_t i = 0; i < ext->used; ++i) {
234 if (buffer_is_equal(key, &ext->exts[i].key)) {
235 fe = ext->exts+i;
236 break;
237 }
238 }
239
240 if (NULL == fe) {
241 if (!(ext->used & (8-1))) {
242 ck_realloc_u32((void **)&ext->exts,ext->used,8,sizeof(*ext->exts));
243 memset((void *)(ext->exts + ext->used), 0, 8 * sizeof(*ext->exts));
244 }
245 fe = ext->exts + ext->used++;
246 fe->last_used_ndx = -1;
247 buffer *b;
248 *(const buffer **)&b = &fe->key;
249 memcpy(b, key, sizeof(buffer)); /*(copy; not later free'd)*/
250 }
251
252 if (!(fe->used & (4-1)))
253 ck_realloc_u32((void **)&fe->hosts, fe->used, 4, sizeof(*fe->hosts));
254 fe->hosts[fe->used++] = fh;
255 return 0;
256 }
257
gw_proc_connect_success(gw_host * host,gw_proc * proc,int debug,request_st * const r)258 static void gw_proc_connect_success(gw_host *host, gw_proc *proc, int debug, request_st * const r) {
259 gw_proc_connected_inc(host, proc); /*(".connected")*/
260 proc->last_used = log_monotonic_secs;
261
262 if (debug) {
263 log_error(r->conf.errh, __FILE__, __LINE__,
264 "got proc: pid: %d socket: %s load: %d",
265 proc->pid, proc->connection_name->ptr, proc->load);
266 }
267 }
268
269 __attribute_cold__
gw_proc_connect_error(request_st * const r,gw_host * host,gw_proc * proc,pid_t pid,int errnum,int debug)270 static void gw_proc_connect_error(request_st * const r, gw_host *host, gw_proc *proc, pid_t pid, int errnum, int debug) {
271 const unix_time64_t cur_ts = log_monotonic_secs;
272 log_error_st * const errh = r->conf.errh;
273 errno = errnum; /*(for log_perror())*/
274 log_perror(errh, __FILE__, __LINE__,
275 "establishing connection failed: socket: %s", proc->connection_name->ptr);
276
277 if (!proc->is_local) {
278 proc->disabled_until = cur_ts + host->disable_time;
279 gw_proc_set_state(host, proc, PROC_STATE_OVERLOADED);
280 }
281 else if (proc->pid == pid && proc->state == PROC_STATE_RUNNING) {
282 /* several requests from lighttpd might reference the same proc
283 *
284 * Only one of them should mark the proc
285 * and all other ones should just take a new one.
286 *
287 * If a new proc was started with the old struct, this might
288 * otherwise lead to marking a perfectly good proc as dead
289 */
290 log_error(errh, __FILE__, __LINE__,
291 "backend error; we'll disable for %d"
292 "secs and send the request to another backend instead:"
293 "load: %d", host->disable_time, host->load);
294 if (EAGAIN == errnum) {
295 /* - EAGAIN: cool down the backend; it is overloaded */
296 #ifdef __linux__
297 log_error(errh, __FILE__, __LINE__,
298 "If this happened on Linux: You have run out of local ports. "
299 "Check the manual, section Performance how to handle this.");
300 #endif
301 if (debug) {
302 log_error(errh, __FILE__, __LINE__,
303 "This means that you have more incoming requests than your "
304 "FastCGI backend can handle in parallel. It might help to "
305 "spawn more FastCGI backends or PHP children; if not, "
306 "decrease server.max-connections. The load for this FastCGI "
307 "backend %s is %d", proc->connection_name->ptr, proc->load);
308 }
309 proc->disabled_until = cur_ts + host->disable_time;
310 gw_proc_set_state(host, proc, PROC_STATE_OVERLOADED);
311 }
312 else {
313 /* we got a hard error from the backend like
314 * - ECONNREFUSED for tcp-ip sockets
315 * - ENOENT for unix-domain-sockets
316 */
317 #if 0
318 gw_proc_set_state(host, proc, PROC_STATE_DIED_WAIT_FOR_PID);
319 #else /* treat as overloaded (future: unless we send kill() signal)*/
320 proc->disabled_until = cur_ts + host->disable_time;
321 gw_proc_set_state(host, proc, PROC_STATE_OVERLOADED);
322 #endif
323 }
324 }
325
326 if (EAGAIN == errnum) {
327 gw_proc_tag_inc(host, proc, CONST_STR_LEN(".overloaded"));
328 }
329 else {
330 gw_proc_tag_inc(host, proc, CONST_STR_LEN(".died"));
331 }
332 }
333
gw_proc_release(gw_host * host,gw_proc * proc,int debug,log_error_st * errh)334 static void gw_proc_release(gw_host *host, gw_proc *proc, int debug, log_error_st *errh) {
335 gw_proc_load_dec(host, proc);
336
337 if (debug) {
338 log_error(errh, __FILE__, __LINE__,
339 "released proc: pid: %d socket: %s load: %u",
340 proc->pid, proc->connection_name->ptr, proc->load);
341 }
342 }
343
344 __attribute_cold__
gw_proc_check_enable(gw_host * const host,gw_proc * const proc,log_error_st * const errh)345 static void gw_proc_check_enable(gw_host * const host, gw_proc * const proc, log_error_st * const errh) {
346 if (log_monotonic_secs <= proc->disabled_until) return;
347 if (proc->state != PROC_STATE_OVERLOADED) return;
348
349 gw_proc_set_state(host, proc, PROC_STATE_RUNNING);
350
351 log_error(errh, __FILE__, __LINE__,
352 "gw-server re-enabled: %s %s %hu %s",
353 proc->connection_name->ptr,
354 host->host ? host->host->ptr : "", host->port,
355 host->unixsocket ? host->unixsocket->ptr : "");
356 }
357
358 __attribute_cold__
gw_proc_waitpid_log(const gw_host * const host,const gw_proc * const proc,log_error_st * const errh,const int status)359 static void gw_proc_waitpid_log(const gw_host * const host, const gw_proc * const proc, log_error_st * const errh, const int status) {
360 if (WIFEXITED(status)) {
361 if (proc->state != PROC_STATE_KILLED) {
362 log_error(errh, __FILE__, __LINE__,
363 "child exited: %d %s",
364 WEXITSTATUS(status), proc->connection_name->ptr);
365 }
366 } else if (WIFSIGNALED(status)) {
367 if (WTERMSIG(status) != SIGTERM && WTERMSIG(status) != SIGINT
368 && WTERMSIG(status) != host->kill_signal) {
369 log_error(errh, __FILE__, __LINE__,
370 "child signalled: %d", WTERMSIG(status));
371 }
372 } else {
373 log_error(errh, __FILE__, __LINE__,
374 "child died somehow: %d", status);
375 }
376 }
377
gw_proc_waitpid(gw_host * host,gw_proc * proc,log_error_st * errh)378 static int gw_proc_waitpid(gw_host *host, gw_proc *proc, log_error_st *errh) {
379 int rc, status;
380
381 if (!proc->is_local) return 0;
382 if (proc->pid <= 0) return 0;
383
384 rc = fdevent_waitpid(proc->pid, &status, 1);
385 if (0 == rc) return 0; /* child still running */
386
387 /* child terminated */
388 if (-1 == rc) {
389 /* EINVAL or ECHILD no child processes */
390 /* should not happen; someone else has cleaned up for us */
391 log_perror(errh, __FILE__, __LINE__,
392 "pid %d %d not found", proc->pid, proc->state);
393 }
394 else {
395 gw_proc_waitpid_log(host, proc, errh, status);
396 }
397
398 proc->pid = 0;
399 if (proc->state != PROC_STATE_KILLED)
400 proc->disabled_until = log_monotonic_secs;
401 gw_proc_set_state(host, proc, PROC_STATE_DIED);
402 return 1;
403 }
404
405 __attribute_cold__
gw_proc_sockaddr_init(gw_host * const host,gw_proc * const proc,log_error_st * const errh)406 static int gw_proc_sockaddr_init(gw_host * const host, gw_proc * const proc, log_error_st * const errh) {
407 sock_addr addr;
408 socklen_t addrlen;
409
410 if (proc->unixsocket) {
411 if (1 != sock_addr_from_str_hints(&addr,&addrlen,proc->unixsocket->ptr,
412 AF_UNIX, 0, errh)) {
413 errno = EINVAL;
414 return -1;
415 }
416 buffer_clear(proc->connection_name);
417 buffer_append_str2(proc->connection_name,
418 CONST_STR_LEN("unix:"),
419 BUF_PTR_LEN(proc->unixsocket));
420 }
421 else {
422 #ifdef __COVERITY__
423 force_assert(host->host); /*(not NULL if !host->unixsocket)*/
424 #endif
425 /*(note: name resolution here is *blocking* if IP string not supplied)*/
426 if (1 != sock_addr_from_str_hints(&addr, &addrlen, host->host->ptr,
427 0, proc->port, errh)) {
428 errno = EINVAL;
429 return -1;
430 }
431 else if (host->host->size) {
432 /*(skip if constant string set in gw_set_defaults_backend())*/
433 /* overwrite host->host buffer with IP addr string so that
434 * any further use of gw_host does not block on DNS lookup */
435 buffer *h;
436 *(const buffer **)&h = host->host;
437 sock_addr_inet_ntop_copy_buffer(h, &addr);
438 host->family = sock_addr_get_family(&addr);
439 }
440 buffer_clear(proc->connection_name);
441 buffer_append_str3(proc->connection_name,
442 CONST_STR_LEN("tcp:"),
443 BUF_PTR_LEN(host->host),
444 CONST_STR_LEN(":"));
445 buffer_append_int(proc->connection_name, proc->port);
446 }
447
448 if (NULL != proc->saddr && proc->saddrlen < addrlen) {
449 free(proc->saddr);
450 proc->saddr = NULL;
451 }
452 if (NULL == proc->saddr) {
453 proc->saddr = (struct sockaddr *)ck_malloc(addrlen);
454 }
455 proc->saddrlen = addrlen;
456 memcpy(proc->saddr, &addr, addrlen);
457 return 0;
458 }
459
env_add(char_array * env,const char * key,size_t key_len,const char * val,size_t val_len)460 static int env_add(char_array *env, const char *key, size_t key_len, const char *val, size_t val_len) {
461 char *dst;
462
463 if (!key || !val) return -1;
464
465 dst = ck_malloc(key_len + val_len + 2);
466 memcpy(dst, key, key_len);
467 dst[key_len] = '=';
468 memcpy(dst + key_len + 1, val, val_len + 1); /* add the \0 from the value */
469
470 for (uint32_t i = 0; i < env->used; ++i) {
471 #ifdef __COVERITY__
472 force_assert(env->ptr); /*(non-NULL if env->used != 0)*/
473 #endif
474 if (0 == strncmp(dst, env->ptr[i], key_len + 1)) {
475 free(env->ptr[i]);
476 env->ptr[i] = dst;
477 return 0;
478 }
479 }
480
481 if (!(env->used & (16-1)))
482 ck_realloc_u32((void **)&env->ptr, env->used, 16, sizeof(*env->ptr));
483 env->ptr[env->used++] = dst;
484
485 return 0;
486 }
487
488 __attribute_cold__
gw_spawn_connection(gw_host * const host,gw_proc * const proc,log_error_st * const errh,int debug)489 static int gw_spawn_connection(gw_host * const host, gw_proc * const proc, log_error_st * const errh, int debug) {
490 int gw_fd;
491 int status;
492
493 if (debug) {
494 log_error(errh, __FILE__, __LINE__,
495 "new proc, socket: %hu %s",
496 proc->port, proc->unixsocket ? proc->unixsocket->ptr : "");
497 }
498
499 gw_fd = fdevent_socket_cloexec(proc->saddr->sa_family, SOCK_STREAM, 0);
500 if (-1 == gw_fd) {
501 log_perror(errh, __FILE__, __LINE__, "socket()");
502 return -1;
503 }
504
505 do {
506 status = connect(gw_fd, proc->saddr, proc->saddrlen);
507 } while (-1 == status && errno == EINTR);
508
509 if (-1 == status && errno != ENOENT && proc->unixsocket) {
510 log_perror(errh, __FILE__, __LINE__,
511 "connect %s", proc->unixsocket->ptr);
512 unlink(proc->unixsocket->ptr);
513 }
514
515 close(gw_fd);
516
517 if (-1 == status) {
518 /* server is not up, spawn it */
519 char_array env;
520 uint32_t i;
521
522 /* reopen socket */
523 gw_fd = fdevent_socket_cloexec(proc->saddr->sa_family, SOCK_STREAM, 0);
524 if (-1 == gw_fd) {
525 log_perror(errh, __FILE__, __LINE__, "socket()");
526 return -1;
527 }
528
529 if (fdevent_set_so_reuseaddr(gw_fd, 1) < 0) {
530 log_perror(errh, __FILE__, __LINE__, "socketsockopt()");
531 close(gw_fd);
532 return -1;
533 }
534
535 /* create socket */
536 if (-1 == bind(gw_fd, proc->saddr, proc->saddrlen)) {
537 log_perror(errh, __FILE__, __LINE__,
538 "bind failed for: %s", proc->connection_name->ptr);
539 close(gw_fd);
540 return -1;
541 }
542
543 if (-1 == listen(gw_fd, host->listen_backlog)) {
544 log_perror(errh, __FILE__, __LINE__, "listen()");
545 close(gw_fd);
546 return -1;
547 }
548
549 {
550 /* create environment */
551 env.ptr = NULL;
552 env.used = 0;
553
554 /* build clean environment */
555 if (host->bin_env_copy && host->bin_env_copy->used) {
556 for (i = 0; i < host->bin_env_copy->used; ++i) {
557 data_string *ds=(data_string *)host->bin_env_copy->data[i];
558 char *ge;
559
560 if (NULL != (ge = getenv(ds->value.ptr))) {
561 env_add(&env, BUF_PTR_LEN(&ds->value), ge, strlen(ge));
562 }
563 }
564 } else {
565 char ** const e = fdevent_environ();
566 for (i = 0; e[i]; ++i) {
567 char *eq;
568
569 if (NULL != (eq = strchr(e[i], '='))) {
570 env_add(&env, e[i], eq - e[i], eq+1, strlen(eq+1));
571 }
572 }
573 }
574
575 /* create environment */
576 if (host->bin_env) {
577 for (i = 0; i < host->bin_env->used; ++i) {
578 data_string *ds = (data_string *)host->bin_env->data[i];
579 env_add(&env, BUF_PTR_LEN(&ds->key),
580 BUF_PTR_LEN(&ds->value));
581 }
582 }
583
584 for (i = 0; i < env.used; ++i) {
585 /* search for PHP_FCGI_CHILDREN */
586 if (0 == strncmp(env.ptr[i], "PHP_FCGI_CHILDREN=",
587 sizeof("PHP_FCGI_CHILDREN=")-1)) {
588 break;
589 }
590 }
591
592 /* not found, add a default */
593 if (i == env.used) {
594 env_add(&env, CONST_STR_LEN("PHP_FCGI_CHILDREN"),
595 CONST_STR_LEN("1"));
596 }
597
598 if (!(env.used & (16-1)))
599 ck_realloc_u32((void **)&env.ptr,env.used,1,sizeof(*env.ptr));
600 env.ptr[env.used] = NULL;
601 }
602
603 int dfd = fdevent_open_dirname(host->args.ptr[0], 1);/*permit symlinks*/
604 if (-1 == dfd) {
605 log_perror(errh, __FILE__, __LINE__,
606 "open dirname failed: %s", host->args.ptr[0]);
607 }
608
609 /*(FCGI_LISTENSOCK_FILENO == STDIN_FILENO == 0)*/
610 proc->pid = (dfd >= 0)
611 ? fdevent_fork_execve(host->args.ptr[0], host->args.ptr,
612 env.ptr, gw_fd, -1, -1, dfd)
613 : -1;
614
615 for (i = 0; i < env.used; ++i) free(env.ptr[i]);
616 free(env.ptr);
617 if (-1 != dfd) close(dfd);
618 close(gw_fd);
619
620 if (-1 == proc->pid) {
621 log_error(errh, __FILE__, __LINE__,
622 "gw-backend failed to start: %s", host->bin_path->ptr);
623 proc->pid = 0;
624 proc->disabled_until = log_monotonic_secs;
625 return -1;
626 }
627
628 /* register process */
629 proc->last_used = log_monotonic_secs;
630 proc->is_local = 1;
631
632 /* wait */
633 struct timeval tv = { 0, 1000 };
634 select(0, NULL, NULL, NULL, &tv);
635
636 if (0 != gw_proc_waitpid(host, proc, errh)) {
637 log_error(errh, __FILE__, __LINE__,
638 "gw-backend failed to start: %s", host->bin_path->ptr);
639 log_error(errh, __FILE__, __LINE__,
640 "If you're trying to run your app as a FastCGI backend, make "
641 "sure you're using the FastCGI-enabled version. If this is PHP "
642 "on Gentoo, add 'fastcgi' to the USE flags. If this is PHP, try "
643 "removing the bytecode caches for now and try again.");
644 return -1;
645 }
646 } else {
647 proc->is_local = 0;
648 proc->pid = 0;
649
650 if (debug) {
651 log_error(errh, __FILE__, __LINE__,
652 "(debug) socket is already used; won't spawn: %s",
653 proc->connection_name->ptr);
654 }
655 }
656
657 gw_proc_set_state(host, proc, PROC_STATE_RUNNING);
658 return 0;
659 }
660
661 __attribute_cold__
gw_proc_spawn(gw_host * const host,log_error_st * const errh,const int debug)662 static void gw_proc_spawn(gw_host * const host, log_error_st * const errh, const int debug) {
663 gw_proc *proc;
664 for (proc = host->unused_procs; proc; proc = proc->next) {
665 /* (proc->pid <= 0 indicates PROC_STATE_DIED, not PROC_STATE_KILLED) */
666 if (proc->pid > 0) continue;
667 /* (do not attempt to spawn another proc if a proc just exited) */
668 if (proc->disabled_until >= log_monotonic_secs) return;
669 break;
670 }
671 if (proc) {
672 if (proc == host->unused_procs)
673 host->unused_procs = proc->next;
674 else
675 proc->prev->next = proc->next;
676
677 if (proc->next) {
678 proc->next->prev = proc->prev;
679 proc->next = NULL;
680 }
681
682 proc->prev = NULL;
683 gw_proc_init_portpath(host, proc);
684 } else {
685 proc = gw_proc_init(host);
686 }
687
688 if (0 != gw_proc_sockaddr_init(host, proc, errh)) {
689 /*(should not happen if host->host validated at startup,
690 * and translated from name to IP address at startup)*/
691 log_error(errh, __FILE__, __LINE__,
692 "ERROR: spawning backend failed.");
693 if (proc->id == host->max_id-1) --host->max_id;
694 gw_proc_free(proc);
695 } else if (gw_spawn_connection(host, proc, errh, debug)) {
696 log_error(errh, __FILE__, __LINE__,
697 "ERROR: spawning backend failed.");
698 proc->next = host->unused_procs;
699 if (host->unused_procs)
700 host->unused_procs->prev = proc;
701 host->unused_procs = proc;
702 } else {
703 proc->next = host->first;
704 if (host->first)
705 host->first->prev = proc;
706 host->first = proc;
707 ++host->num_procs;
708 }
709 }
710
711 __attribute_cold__
gw_proc_kill(gw_host * host,gw_proc * proc)712 static void gw_proc_kill(gw_host *host, gw_proc *proc) {
713 if (proc->next) proc->next->prev = proc->prev;
714 if (proc->prev) proc->prev->next = proc->next;
715 else host->first = proc->next;
716 --host->num_procs;
717
718 proc->prev = NULL;
719 proc->next = host->unused_procs;
720 proc->disabled_until = 0;
721
722 if (host->unused_procs)
723 host->unused_procs->prev = proc;
724 host->unused_procs = proc;
725
726 kill(proc->pid, host->kill_signal);
727
728 gw_proc_set_state(host, proc, PROC_STATE_KILLED);
729 }
730
731 __attribute_pure__
unixsocket_is_dup(gw_plugin_data * p,const buffer * unixsocket)732 static gw_host * unixsocket_is_dup(gw_plugin_data *p, const buffer *unixsocket) {
733 if (NULL == p->cvlist) return NULL;
734 /* (init i to 0 if global context; to 1 to skip empty global context) */
735 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
736 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
737 gw_plugin_config *conf = NULL;
738 for (; -1 != cpv->k_id; ++cpv) {
739 switch (cpv->k_id) {
740 case 0: /* xxxxx.server */
741 if (cpv->vtype == T_CONFIG_LOCAL) conf = cpv->v.v;
742 break;
743 default:
744 break;
745 }
746 }
747
748 if (NULL == conf || NULL == conf->exts) continue;
749
750 gw_exts *exts = conf->exts;
751 for (uint32_t j = 0; j < exts->used; ++j) {
752 gw_extension *ex = exts->exts+j;
753 for (uint32_t n = 0; n < ex->used; ++n) {
754 gw_host *host = ex->hosts[n];
755 if (host->unixsocket
756 && buffer_is_equal(host->unixsocket, unixsocket)
757 && host->bin_path)
758 return host;
759 }
760 }
761 }
762
763 return NULL;
764 }
765
parse_binpath(char_array * env,const buffer * b)766 static void parse_binpath(char_array *env, const buffer *b) {
767 char *start = b->ptr;
768 char c;
769 /* search for spaces */
770 for (size_t i = 0, used = buffer_clen(b); i < used; ++i) {
771 switch(b->ptr[i]) {
772 case ' ':
773 case '\t':
774 /* a WS, stop here and copy the argument */
775
776 if (!(env->used & (4-1)))
777 ck_realloc_u32((void**)&env->ptr,env->used,4,sizeof(*env->ptr));
778
779 c = b->ptr[i];
780 b->ptr[i] = '\0';
781 env->ptr[env->used++] = strdup(start);
782 b->ptr[i] = c;
783
784 start = b->ptr + i + 1;
785 break;
786 default:
787 break;
788 }
789 }
790
791 if (!(env->used & (4-1)) || !((env->used+1) & (4-1)))
792 ck_realloc_u32((void **)&env->ptr, env->used, 2, sizeof(*env->ptr));
793 env->ptr[env->used++] = strdup(start);
794 env->ptr[env->used] = NULL;
795 }
796
797 enum {
798 GW_BALANCE_LEAST_CONNECTION,
799 GW_BALANCE_RR,
800 GW_BALANCE_HASH,
801 GW_BALANCE_STICKY
802 };
803
804 __attribute_noinline__
805 __attribute_pure__
806 static uint32_t
gw_hash(const char * str,const uint32_t len,uint32_t hash)807 gw_hash(const char *str, const uint32_t len, uint32_t hash)
808 {
809 return djbhash(str, len, hash);
810 }
811
gw_host_get(request_st * const r,gw_extension * extension,int balance,int debug)812 static gw_host * gw_host_get(request_st * const r, gw_extension *extension, int balance, int debug) {
813 int ndx = -1;
814 const int ext_used = (int)extension->used;
815
816 if (ext_used <= 1) {
817 if (1 == ext_used && extension->hosts[0]->active_procs > 0)
818 ndx = 0;
819 }
820 else {
821 /*const char *balancing = "";*/
822 switch(balance) {
823 case GW_BALANCE_HASH:
824 { /* hash balancing */
825 const uint32_t base_hash =
826 gw_hash(BUF_PTR_LEN(&r->uri.authority),
827 gw_hash(BUF_PTR_LEN(&r->uri.path), DJBHASH_INIT));
828 uint32_t last_max = UINT32_MAX;
829 for (int k = 0; k < ext_used; ++k) {
830 const gw_host * const host = extension->hosts[k];
831 if (0 == host->active_procs) continue;
832 const uint32_t cur_max = base_hash ^ host->gw_hash;
833 #if 0
834 if (debug) {
835 log_error(r->conf.errh, __FILE__, __LINE__,
836 "proxy - election: %s %s %s %u", r->uri.path.ptr,
837 host->host ? host->host->ptr : "",
838 r->uri.authority.ptr, cur_max);
839 }
840 #endif
841 if (last_max < cur_max || last_max == UINT32_MAX) {
842 last_max = cur_max;
843 ndx = k;
844 }
845 }
846 /*balancing = "hash";*/
847 break;
848 }
849 case GW_BALANCE_LEAST_CONNECTION:
850 { /* fair balancing */
851 for (int k = 0, max_usage = INT_MAX; k < ext_used; ++k) {
852 const gw_host * const host = extension->hosts[k];
853 if (0 == host->active_procs) continue;
854 if (host->load < max_usage) {
855 max_usage = host->load;
856 ndx = k;
857 }
858 }
859 /*balancing = "least connection";*/
860 break;
861 }
862 case GW_BALANCE_RR:
863 { /* round robin */
864 const gw_host *host = extension->hosts[0];
865
866 /* Use last_used_ndx from first host in list */
867 int k = extension->last_used_ndx;
868 ndx = k + 1; /* use next host after the last one */
869 if (ndx < 0) ndx = 0;
870
871 /* Search first active host after last_used_ndx */
872 while (ndx < ext_used
873 && 0 == (host = extension->hosts[ndx])->active_procs) ++ndx;
874
875 if (ndx >= ext_used) {
876 /* didn't find a higher id, wrap to the start */
877 for (ndx = 0; ndx <= (int) k; ++ndx) {
878 host = extension->hosts[ndx];
879 if (0 != host->active_procs) break;
880 }
881
882 /* No active host found */
883 if (0 == host->active_procs) ndx = -1;
884 }
885
886 /* Save new index for next round */
887 extension->last_used_ndx = ndx;
888
889 /*balancing = "round-robin";*/
890 break;
891 }
892 case GW_BALANCE_STICKY:
893 { /* source sticky balancing */
894 const buffer * const dst_addr_buf = r->dst_addr_buf;
895 const uint32_t base_hash =
896 gw_hash(BUF_PTR_LEN(dst_addr_buf), DJBHASH_INIT);
897 uint32_t last_max = UINT32_MAX;
898 for (int k = 0; k < ext_used; ++k) {
899 const gw_host * const host = extension->hosts[k];
900 if (0 == host->active_procs) continue;
901 const uint32_t cur_max = base_hash ^ host->gw_hash ^ host->port;
902 #if 0
903 if (debug) {
904 log_error(r->conf.errh, __FILE__, __LINE__,
905 "proxy - election: %s %s %hu %u", dst_addr_buf->ptr,
906 host->host ? host->host->ptr : "",
907 host->port, cur_max);
908 }
909 #endif
910 if (last_max < cur_max || last_max == UINT32_MAX) {
911 last_max = cur_max;
912 ndx = k;
913 }
914 }
915 /*balancing = "sticky";*/
916 break;
917 }
918 default:
919 break;
920 }
921 #if 0
922 if (debug) {
923 log_error(r->conf.errh, __FILE__, __LINE__,
924 "gw - balancing: %s, hosts: %d", balancing, ext_used);
925 }
926 #endif
927 }
928
929 if (__builtin_expect( (-1 != ndx), 1)) {
930 /* found a server */
931
932 if (debug) {
933 gw_host * const host = extension->hosts[ndx];
934 log_error(r->conf.errh, __FILE__, __LINE__,
935 "gw - found a host %s %hu",
936 host->host ? host->host->ptr : "", host->port);
937 return host;
938 }
939
940 return extension->hosts[ndx];
941 } else if (0 == r->con->srv->srvconf.max_worker) {
942 /* special-case adaptive spawning and 0 == host->min_procs */
943 for (int k = 0; k < ext_used; ++k) {
944 gw_host * const host = extension->hosts[k];
945 if (0 == host->min_procs && 0 == host->num_procs && host->bin_path){
946 gw_proc_spawn(host, r->con->srv->errh, debug);
947 if (host->num_procs) return host;
948 }
949 }
950 }
951
952 /* all hosts are down */
953 /* sorry, we don't have a server alive for this ext */
954 r->http_status = 503; /* Service Unavailable */
955 r->handler_module = NULL;
956
957 /* only send the 'no handler' once */
958 if (!extension->note_is_sent) {
959 extension->note_is_sent = 1;
960 log_error(r->conf.errh, __FILE__, __LINE__,
961 "all handlers for %s?%.*s on %s are down.",
962 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query),
963 extension->key.ptr);
964 }
965
966 return NULL;
967 }
968
gw_establish_connection(request_st * const r,gw_host * host,gw_proc * proc,pid_t pid,int gw_fd,int debug)969 static int gw_establish_connection(request_st * const r, gw_host *host, gw_proc *proc, pid_t pid, int gw_fd, int debug) {
970 if (-1 == connect(gw_fd, proc->saddr, proc->saddrlen)) {
971 int errnum = errno;
972 if (errnum == EINPROGRESS || errnum == EALREADY || errnum == EINTR
973 || (errnum == EAGAIN && host->unixsocket)) {
974 if (debug > 2) {
975 log_error(r->conf.errh, __FILE__, __LINE__,
976 "connect delayed; will continue later: %s",
977 proc->connection_name->ptr);
978 }
979
980 return 1;
981 } else {
982 gw_proc_connect_error(r, host, proc, pid, errnum, debug);
983 return -1;
984 }
985 }
986
987 if (debug > 1) {
988 log_error(r->conf.errh, __FILE__, __LINE__,
989 "connect succeeded: %d", gw_fd);
990 }
991
992 return 0;
993 }
994
995 __attribute_cold__
996 __attribute_noinline__
gw_restart_dead_proc(gw_host * const host,log_error_st * const errh,const int debug,const int trigger,gw_proc * const proc)997 static void gw_restart_dead_proc(gw_host * const host, log_error_st * const errh, const int debug, const int trigger, gw_proc * const proc) {
998 switch (proc->state) {
999 case PROC_STATE_RUNNING:
1000 break;
1001 case PROC_STATE_OVERLOADED:
1002 gw_proc_check_enable(host, proc, errh);
1003 break;
1004 case PROC_STATE_KILLED:
1005 if (trigger && ++proc->disabled_until > 4) {
1006 int sig = (proc->disabled_until <= 8)
1007 ? host->kill_signal
1008 : proc->disabled_until <= 16 ? SIGTERM : SIGKILL;
1009 kill(proc->pid, sig);
1010 }
1011 break;
1012 case PROC_STATE_DIED_WAIT_FOR_PID:
1013 /*(state should not happen in workers if server.max-worker > 0)*/
1014 /*(if PROC_STATE_DIED_WAIT_FOR_PID is used in future, might want
1015 * to save proc->disabled_until before gw_proc_waitpid() since
1016 * gw_proc_waitpid will set proc->disabled_until=log_monotonic_secs,
1017 * and so process will not be restarted below until one sec later)*/
1018 if (0 == gw_proc_waitpid(host, proc, errh)) {
1019 gw_proc_check_enable(host, proc, errh);
1020 }
1021
1022 if (proc->state != PROC_STATE_DIED) break;
1023 __attribute_fallthrough__/*(we have a dead proc now)*/
1024
1025 case PROC_STATE_DIED:
1026 /* local procs get restarted by us,
1027 * remote ones hopefully by the admin */
1028
1029 if (host->bin_path) {
1030 /* we still have connections bound to this proc,
1031 * let them terminate first */
1032 if (proc->load != 0) break;
1033
1034 /* avoid spinning if child exits too quickly */
1035 if (proc->disabled_until >= log_monotonic_secs) break;
1036
1037 /* restart the child */
1038
1039 if (debug) {
1040 log_error(errh, __FILE__, __LINE__,
1041 "--- gw spawning"
1042 "\n\tsocket %s"
1043 "\n\tcurrent: 1 / %u",
1044 proc->connection_name->ptr, host->max_procs);
1045 }
1046
1047 if (gw_spawn_connection(host, proc, errh, debug)) {
1048 log_error(errh, __FILE__, __LINE__,
1049 "ERROR: spawning gw failed.");
1050 }
1051 } else {
1052 gw_proc_check_enable(host, proc, errh);
1053 }
1054 break;
1055 }
1056 }
1057
gw_restart_dead_procs(gw_host * const host,log_error_st * const errh,const int debug,const int trigger)1058 static void gw_restart_dead_procs(gw_host * const host, log_error_st * const errh, const int debug, const int trigger) {
1059 for (gw_proc *proc = host->first; proc; proc = proc->next) {
1060 if (debug > 2) {
1061 log_error(errh, __FILE__, __LINE__,
1062 "proc: %s %d %d %d %d", proc->connection_name->ptr,
1063 proc->state, proc->is_local, proc->load, proc->pid);
1064 }
1065 if (proc->state != PROC_STATE_RUNNING)
1066 gw_restart_dead_proc(host, errh, debug, trigger, proc);
1067 }
1068 }
1069
1070
1071
1072
1073 #include "base.h"
1074 #include "response.h"
1075
1076
1077 /* ok, we need a prototype */
1078 static handler_t gw_handle_fdevent(void *ctx, int revents);
1079 static handler_t gw_process_fdevent(gw_handler_ctx *hctx, request_st *r, int revents);
1080
1081
1082 __attribute_returns_nonnull__
handler_ctx_init(size_t sz)1083 static gw_handler_ctx * handler_ctx_init(size_t sz) {
1084 gw_handler_ctx *hctx = ck_calloc(1, 0 == sz ? sizeof(*hctx) : sz);
1085
1086 /*hctx->response = chunk_buffer_acquire();*//*(allocated when needed)*/
1087
1088 hctx->request_id = 0;
1089 hctx->gw_mode = GW_RESPONDER;
1090 hctx->state = GW_STATE_INIT;
1091 hctx->proc = NULL;
1092
1093 hctx->fd = -1;
1094
1095 hctx->reconnects = 0;
1096 hctx->send_content_body = 1;
1097
1098 /*hctx->rb = chunkqueue_init();*//*(allocated when needed)*/
1099 chunkqueue_init(&hctx->wb);
1100 hctx->wb_reqlen = 0;
1101
1102 return hctx;
1103 }
1104
handler_ctx_free(gw_handler_ctx * hctx)1105 static void handler_ctx_free(gw_handler_ctx *hctx) {
1106 /* caller MUST have called gw_backend_close(hctx, r) if necessary */
1107 if (hctx->handler_ctx_free) hctx->handler_ctx_free(hctx);
1108 chunk_buffer_release(hctx->response);
1109
1110 if (hctx->rb) chunkqueue_free(hctx->rb);
1111 chunkqueue_reset(&hctx->wb);
1112
1113 free(hctx);
1114 }
1115
handler_ctx_clear(gw_handler_ctx * hctx)1116 static void handler_ctx_clear(gw_handler_ctx *hctx) {
1117 /* caller MUST have called gw_backend_close(hctx, r) if necessary */
1118
1119 hctx->proc = NULL;
1120 hctx->host = NULL;
1121 hctx->ext = NULL;
1122 /*hctx->ext_auth is intentionally preserved to flag prior authorizer*/
1123
1124 hctx->gw_mode = GW_RESPONDER;
1125 hctx->state = GW_STATE_INIT;
1126 /*hctx->state_timestamp = 0;*//*(unused; left as-is)*/
1127
1128 if (hctx->rb) chunkqueue_reset(hctx->rb);
1129 chunkqueue_reset(&hctx->wb);
1130 hctx->wb_reqlen = 0;
1131
1132 if (hctx->response) buffer_clear(hctx->response);
1133
1134 hctx->fd = -1;
1135 hctx->reconnects = 0;
1136 hctx->request_id = 0;
1137 hctx->send_content_body = 1;
1138
1139 /*plugin_config conf;*//*(no need to reset for same request)*/
1140
1141 /*hctx->r = NULL;*//*(no need to reset for same request)*/
1142 /*hctx->plugin_data = NULL;*//*(no need to reset for same request)*/
1143 }
1144
1145
gw_init(void)1146 void * gw_init(void) {
1147 return ck_calloc(1, sizeof(gw_plugin_data));
1148 }
1149
1150
gw_plugin_config_free(gw_plugin_config * s)1151 void gw_plugin_config_free(gw_plugin_config *s) {
1152 gw_exts *exts = s->exts;
1153 if (exts) {
1154 for (uint32_t j = 0; j < exts->used; ++j) {
1155 gw_extension *ex = exts->exts+j;
1156 for (uint32_t n = 0; n < ex->used; ++n) {
1157 gw_proc *proc;
1158 gw_host *host = ex->hosts[n];
1159
1160 for (proc = host->first; proc; proc = proc->next) {
1161 if (proc->pid > 0) {
1162 kill(proc->pid, host->kill_signal);
1163 }
1164
1165 if (proc->is_local && proc->unixsocket) {
1166 unlink(proc->unixsocket->ptr);
1167 }
1168 }
1169
1170 for (proc = host->unused_procs; proc; proc = proc->next) {
1171 if (proc->pid > 0) {
1172 kill(proc->pid, host->kill_signal);
1173 }
1174 if (proc->is_local && proc->unixsocket) {
1175 unlink(proc->unixsocket->ptr);
1176 }
1177 }
1178 }
1179 }
1180
1181 gw_extensions_free(s->exts);
1182 gw_extensions_free(s->exts_auth);
1183 gw_extensions_free(s->exts_resp);
1184 }
1185 free(s);
1186 }
1187
gw_free(void * p_d)1188 void gw_free(void *p_d) {
1189 gw_plugin_data * const p = p_d;
1190 if (NULL == p->cvlist) return;
1191 /* (init i to 0 if global context; to 1 to skip empty global context) */
1192 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
1193 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
1194 for (; -1 != cpv->k_id; ++cpv) {
1195 switch (cpv->k_id) {
1196 case 0: /* xxxxx.server */
1197 if (cpv->vtype == T_CONFIG_LOCAL)
1198 gw_plugin_config_free(cpv->v.v);
1199 break;
1200 default:
1201 break;
1202 }
1203 }
1204 }
1205 }
1206
gw_exts_clear_check_local(gw_exts * exts)1207 void gw_exts_clear_check_local(gw_exts *exts) {
1208 for (uint32_t j = 0; j < exts->used; ++j) {
1209 gw_extension *ex = exts->exts+j;
1210 for (uint32_t n = 0; n < ex->used; ++n) {
1211 ex->hosts[n]->check_local = 0;
1212 }
1213 }
1214 }
1215
gw_set_defaults_backend(server * srv,gw_plugin_data * p,const array * a,gw_plugin_config * s,int sh_exec,const char * cpkkey)1216 int gw_set_defaults_backend(server *srv, gw_plugin_data *p, const array *a, gw_plugin_config *s, int sh_exec, const char *cpkkey) {
1217 /* per-module plugin_config MUST have common "base class" gw_plugin_config*/
1218 /* per-module plugin_data MUST have pointer-compatible common "base class"
1219 * with gw_plugin_data (stemming from gw_plugin_config compatibility) */
1220
1221 static const config_plugin_keys_t cpk[] = {
1222 { CONST_STR_LEN("host"),
1223 T_CONFIG_STRING,
1224 T_CONFIG_SCOPE_CONNECTION }
1225 ,{ CONST_STR_LEN("port"),
1226 T_CONFIG_SHORT,
1227 T_CONFIG_SCOPE_CONNECTION }
1228 ,{ CONST_STR_LEN("socket"),
1229 T_CONFIG_STRING,
1230 T_CONFIG_SCOPE_CONNECTION }
1231 ,{ CONST_STR_LEN("listen-backlog"),
1232 T_CONFIG_INT,
1233 T_CONFIG_SCOPE_CONNECTION }
1234 ,{ CONST_STR_LEN("bin-path"),
1235 T_CONFIG_STRING,
1236 T_CONFIG_SCOPE_CONNECTION }
1237 ,{ CONST_STR_LEN("kill-signal"),
1238 T_CONFIG_SHORT,
1239 T_CONFIG_SCOPE_CONNECTION }
1240 ,{ CONST_STR_LEN("check-local"),
1241 T_CONFIG_BOOL,
1242 T_CONFIG_SCOPE_CONNECTION }
1243 ,{ CONST_STR_LEN("mode"),
1244 T_CONFIG_STRING,
1245 T_CONFIG_SCOPE_CONNECTION }
1246 ,{ CONST_STR_LEN("docroot"),
1247 T_CONFIG_STRING,
1248 T_CONFIG_SCOPE_CONNECTION }
1249 ,{ CONST_STR_LEN("min-procs"),
1250 T_CONFIG_SHORT,
1251 T_CONFIG_SCOPE_CONNECTION }
1252 ,{ CONST_STR_LEN("max-procs"),
1253 T_CONFIG_SHORT,
1254 T_CONFIG_SCOPE_CONNECTION }
1255 ,{ CONST_STR_LEN("max-load-per-proc"),
1256 T_CONFIG_SHORT,
1257 T_CONFIG_SCOPE_CONNECTION }
1258 ,{ CONST_STR_LEN("idle-timeout"),
1259 T_CONFIG_SHORT,
1260 T_CONFIG_SCOPE_CONNECTION }
1261 ,{ CONST_STR_LEN("disable-time"),
1262 T_CONFIG_SHORT,
1263 T_CONFIG_SCOPE_CONNECTION }
1264 ,{ CONST_STR_LEN("bin-environment"),
1265 T_CONFIG_ARRAY_KVSTRING,
1266 T_CONFIG_SCOPE_CONNECTION }
1267 ,{ CONST_STR_LEN("bin-copy-environment"),
1268 T_CONFIG_ARRAY_VLIST,
1269 T_CONFIG_SCOPE_CONNECTION }
1270 ,{ CONST_STR_LEN("broken-scriptfilename"),
1271 T_CONFIG_BOOL,
1272 T_CONFIG_SCOPE_CONNECTION }
1273 ,{ CONST_STR_LEN("strip-request-uri"),
1274 T_CONFIG_STRING,
1275 T_CONFIG_SCOPE_CONNECTION }
1276 ,{ CONST_STR_LEN("fix-root-scriptname"),
1277 T_CONFIG_BOOL,
1278 T_CONFIG_SCOPE_CONNECTION }
1279 ,{ CONST_STR_LEN("allow-x-send-file"),
1280 T_CONFIG_BOOL,
1281 T_CONFIG_SCOPE_CONNECTION }
1282 ,{ CONST_STR_LEN("x-sendfile"),
1283 T_CONFIG_BOOL,
1284 T_CONFIG_SCOPE_CONNECTION }
1285 ,{ CONST_STR_LEN("x-sendfile-docroot"),
1286 T_CONFIG_ARRAY_VLIST,
1287 T_CONFIG_SCOPE_CONNECTION }
1288 ,{ CONST_STR_LEN("tcp-fin-propagate"),
1289 T_CONFIG_BOOL,
1290 T_CONFIG_SCOPE_CONNECTION }
1291 ,{ CONST_STR_LEN("connect-timeout"),
1292 T_CONFIG_INT,
1293 T_CONFIG_SCOPE_CONNECTION }
1294 ,{ CONST_STR_LEN("write-timeout"),
1295 T_CONFIG_INT,
1296 T_CONFIG_SCOPE_CONNECTION }
1297 ,{ CONST_STR_LEN("read-timeout"),
1298 T_CONFIG_INT,
1299 T_CONFIG_SCOPE_CONNECTION }
1300 ,{ NULL, 0,
1301 T_CONFIG_UNSET,
1302 T_CONFIG_SCOPE_UNSET }
1303 };
1304
1305 gw_host *host = NULL;
1306
1307 int graceful_restart_bg =
1308 config_feature_bool(srv, "server.graceful-restart-bg", 0);
1309
1310 p->srv_pid = srv->pid;
1311
1312 s->exts = gw_extensions_init();
1313 s->exts_auth = gw_extensions_init();
1314 s->exts_resp = gw_extensions_init();
1315 /*s->balance = GW_BALANCE_LEAST_CONNECTION;*//*(default)*/
1316
1317 /*
1318 * gw.server = ( "<ext>" => ( ... ),
1319 * "<ext>" => ( ... ) )
1320 */
1321
1322 for (uint32_t j = 0; j < a->used; ++j) {
1323 data_array *da_ext = (data_array *)a->data[j];
1324
1325 /*
1326 * da_ext->key == name of the extension
1327 */
1328
1329 /*
1330 * gw.server = ( "<ext>" =>
1331 * ( "<host>" => ( ... ),
1332 * "<host>" => ( ... )
1333 * ),
1334 * "<ext>" => ... )
1335 */
1336
1337 for (uint32_t n = 0; n < da_ext->value.used; ++n) {
1338 data_array * const da_host = (data_array *)da_ext->value.data[n];
1339
1340 if (da_host->type != TYPE_ARRAY
1341 || !array_is_kvany(&da_host->value)){
1342 log_error(srv->errh, __FILE__, __LINE__,
1343 "unexpected value for gw.server near [%s](string); "
1344 "expected ( \"ext\" => "
1345 "( \"backend-label\" => ( \"key\" => \"value\" )))",
1346 da_host->key.ptr ? da_host->key.ptr : "");
1347 goto error;
1348 }
1349
1350 config_plugin_value_t cvlist[sizeof(cpk)/sizeof(cpk[0])+1];
1351 memset(cvlist, 0, sizeof(cvlist));
1352
1353 array *ca = &da_host->value;
1354 if (!config_plugin_values_init_block(srv, ca, cpk, cpkkey, cvlist))
1355 goto error;
1356
1357 unsigned short host_mode = GW_RESPONDER;
1358
1359 host = gw_host_init();
1360 host->id = &da_host->key;
1361 host->check_local = 1;
1362 host->min_procs = 4;
1363 host->max_procs = 4;
1364 host->max_load_per_proc = 1;
1365 host->idle_timeout = 60;
1366 host->connect_timeout = 8;
1367 host->disable_time = 1;
1368 host->break_scriptfilename_for_php = 0;
1369 host->kill_signal = SIGTERM;
1370 host->fix_root_path_name = 0;
1371 host->listen_backlog = 1024;
1372 host->xsendfile_allow = 0;
1373 host->refcount = 0;
1374
1375 config_plugin_value_t *cpv = cvlist;
1376 for (; -1 != cpv->k_id; ++cpv) {
1377 switch (cpv->k_id) {
1378 case 0: /* host */
1379 if (!buffer_is_blank(cpv->v.b))
1380 host->host = cpv->v.b;
1381 break;
1382 case 1: /* port */
1383 host->port = cpv->v.shrt;
1384 break;
1385 case 2: /* socket */
1386 if (!buffer_is_blank(cpv->v.b))
1387 host->unixsocket = cpv->v.b;
1388 break;
1389 case 3: /* listen-backlog */
1390 host->listen_backlog = cpv->v.u;
1391 break;
1392 case 4: /* bin-path */
1393 if (!buffer_is_blank(cpv->v.b))
1394 host->bin_path = cpv->v.b;
1395 break;
1396 case 5: /* kill-signal */
1397 host->kill_signal = cpv->v.shrt;
1398 break;
1399 case 6: /* check-local */
1400 host->check_local = (0 != cpv->v.u);
1401 break;
1402 case 7: /* mode */
1403 if (!buffer_is_blank(cpv->v.b)) {
1404 const buffer *b = cpv->v.b;
1405 if (buffer_eq_slen(b, CONST_STR_LEN("responder")))
1406 host_mode = GW_RESPONDER;
1407 else if (buffer_eq_slen(b, CONST_STR_LEN("authorizer")))
1408 host_mode = GW_AUTHORIZER;
1409 else
1410 log_error(srv->errh, __FILE__, __LINE__,
1411 "WARNING: unknown gw mode: %s "
1412 "(ignored, mode set to responder)", b->ptr);
1413 }
1414 break;
1415 case 8: /* docroot */
1416 if (!buffer_is_blank(cpv->v.b))
1417 host->docroot = cpv->v.b;
1418 break;
1419 case 9: /* min-procs */
1420 host->min_procs = cpv->v.shrt;
1421 break;
1422 case 10:/* max-procs */
1423 host->max_procs = cpv->v.shrt;
1424 break;
1425 case 11:/* max-load-per-proc */
1426 host->max_load_per_proc = cpv->v.shrt;
1427 break;
1428 case 12:/* idle-timeout */
1429 host->idle_timeout = cpv->v.shrt;
1430 break;
1431 case 13:/* disable-time */
1432 host->disable_time = cpv->v.shrt;
1433 break;
1434 case 14:/* bin-environment */
1435 host->bin_env = cpv->v.a;
1436 break;
1437 case 15:/* bin-copy-environment */
1438 host->bin_env_copy = cpv->v.a;
1439 break;
1440 case 16:/* broken-scriptfilename */
1441 host->break_scriptfilename_for_php = (0 != cpv->v.u);
1442 break;
1443 case 17:/* strip-request-uri */
1444 host->strip_request_uri = cpv->v.b;
1445 if (buffer_has_slash_suffix(host->strip_request_uri)) {
1446 buffer *b; /*(remove trailing slash; see http_cgi.c)*/
1447 *(const buffer **)&b = host->strip_request_uri;
1448 buffer_truncate(b, buffer_clen(b)-1);
1449 }
1450 break;
1451 case 18:/* fix-root-scriptname */
1452 host->fix_root_path_name = (0 != cpv->v.u);
1453 break;
1454 case 19:/* allow-x-send-file */
1455 host->xsendfile_allow = (0 != cpv->v.u);
1456 break;
1457 case 20:/* x-sendfile */
1458 host->xsendfile_allow = (0 != cpv->v.u);
1459 break;
1460 case 21:/* x-sendfile-docroot */
1461 host->xsendfile_docroot = cpv->v.a;
1462 if (cpv->v.a->used) {
1463 for (uint32_t k = 0; k < cpv->v.a->used; ++k) {
1464 data_string *ds = (data_string *)cpv->v.a->data[k];
1465 if (ds->type != TYPE_STRING) {
1466 log_error(srv->errh, __FILE__, __LINE__,
1467 "unexpected type for x-sendfile-docroot; "
1468 "expected: \"x-sendfile-docroot\" => "
1469 "( \"/allowed/path\", ... )");
1470 goto error;
1471 }
1472 if (ds->value.ptr[0] != '/') {
1473 log_error(srv->errh, __FILE__, __LINE__,
1474 "x-sendfile-docroot paths must begin with "
1475 "'/'; invalid: \"%s\"", ds->value.ptr);
1476 goto error;
1477 }
1478 buffer_path_simplify(&ds->value);
1479 buffer_append_slash(&ds->value);
1480 }
1481 }
1482 break;
1483 case 22:/* tcp-fin-propagate */
1484 host->tcp_fin_propagate = (0 != cpv->v.u);
1485 break;
1486 case 23:/* connect-timeout */
1487 host->connect_timeout = cpv->v.u;
1488 break;
1489 case 24:/* write-timeout */
1490 host->write_timeout = cpv->v.u;
1491 break;
1492 case 25:/* read-timeout */
1493 host->read_timeout = cpv->v.u;
1494 break;
1495 default:
1496 break;
1497 }
1498 }
1499
1500 for (uint32_t m = 0; m < da_host->value.used; ++m) {
1501 if (NULL != strchr(da_host->value.data[m]->key.ptr, '_')) {
1502 log_error(srv->errh, __FILE__, __LINE__,
1503 "incorrect directive contains underscore ('_') instead of dash ('-'): %s",
1504 da_host->value.data[m]->key.ptr);
1505 }
1506 }
1507
1508 if ((host->host || host->port) && host->unixsocket) {
1509 log_error(srv->errh, __FILE__, __LINE__,
1510 "either host/port or socket have to be set in: "
1511 "%s = (%s => (%s ( ...", cpkkey, da_ext->key.ptr,
1512 da_host->key.ptr);
1513
1514 goto error;
1515 }
1516
1517 if (host->host && *host->host->ptr == '/' && !host->unixsocket) {
1518 host->unixsocket = host->host;
1519 }
1520
1521 if (host->unixsocket) {
1522 /* unix domain socket */
1523 struct sockaddr_un un;
1524
1525 if (buffer_clen(host->unixsocket) + 1 > sizeof(un.sun_path) - 2) {
1526 log_error(srv->errh, __FILE__, __LINE__,
1527 "unixsocket is too long in: %s = (%s => (%s ( ...",
1528 cpkkey, da_ext->key.ptr, da_host->key.ptr);
1529
1530 goto error;
1531 }
1532
1533 if (host->bin_path) {
1534 gw_host *duplicate = unixsocket_is_dup(p, host->unixsocket);
1535 if (NULL != duplicate) {
1536 if (!buffer_is_equal(host->bin_path, duplicate->bin_path)) {
1537 log_error(srv->errh, __FILE__, __LINE__,
1538 "duplicate unixsocket path: %s",
1539 host->unixsocket->ptr);
1540 goto error;
1541 }
1542 gw_host_free(host);
1543 host = duplicate;
1544 ++host->refcount;
1545 }
1546 }
1547
1548 host->family = AF_UNIX;
1549 } else {
1550 /* tcp/ip */
1551
1552 if (!host->host && !host->bin_path) {
1553 log_error(srv->errh, __FILE__, __LINE__,
1554 "host or bin-path have to be set in: "
1555 "%s = (%s => (%s ( ...", cpkkey, da_ext->key.ptr,
1556 da_host->key.ptr);
1557
1558 goto error;
1559 } else if (0 == host->port) {
1560 host->port = 80;
1561 }
1562
1563 if (!host->host) {
1564 static const buffer lhost ={CONST_STR_LEN("127.0.0.1")+1,0};
1565 host->host = &lhost;
1566 }
1567
1568 host->family = (NULL != strchr(host->host->ptr, ':'))
1569 ? AF_INET6
1570 : AF_INET;
1571 }
1572 if (!host->refcount)
1573 gw_status_init_host(host);
1574
1575 if (host->refcount) {
1576 /* already init'd; skip spawning */
1577 } else if (host->bin_path) {
1578 /* a local socket + self spawning */
1579 struct stat st;
1580 parse_binpath(&host->args, host->bin_path);
1581 if (0 != stat(host->args.ptr[0], &st) || !S_ISREG(st.st_mode)
1582 || !(st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
1583 log_error(srv->errh, __FILE__, __LINE__,
1584 "invalid \"bin-path\" => \"%s\" "
1585 "(check that file exists, is regular file, "
1586 "and is executable by lighttpd)", host->bin_path->ptr);
1587 }
1588
1589 if (sh_exec) {
1590 /*(preserve prior behavior for SCGI exec of command)*/
1591 /*(admin should really prefer to put
1592 * any complex command into a script)*/
1593 for (uint32_t m = 0; m < host->args.used; ++m)
1594 free(host->args.ptr[m]);
1595 free(host->args.ptr);
1596
1597 host->args.ptr = ck_calloc(4, sizeof(char *));
1598 host->args.used = 3;
1599 host->args.ptr[0] = ck_malloc(sizeof("/bin/sh"));
1600 memcpy(host->args.ptr[0], "/bin/sh", sizeof("/bin/sh"));
1601 host->args.ptr[1] = ck_malloc(sizeof("-c"));
1602 memcpy(host->args.ptr[1], "-c", sizeof("-c"));
1603 host->args.ptr[2] = ck_malloc(sizeof("exec ")-1
1604 + buffer_clen(host->bin_path)+1);
1605 memcpy(host->args.ptr[2], "exec ", sizeof("exec ")-1);
1606 memcpy(host->args.ptr[2]+sizeof("exec ")-1,
1607 host->bin_path->ptr, buffer_clen(host->bin_path)+1);
1608 host->args.ptr[3] = NULL;
1609 }
1610
1611 if (host->min_procs > host->max_procs)
1612 host->min_procs = host->max_procs;
1613 if (host->min_procs!= host->max_procs
1614 && 0 != srv->srvconf.max_worker) {
1615 host->min_procs = host->max_procs;
1616 log_error(srv->errh, __FILE__, __LINE__,
1617 "adaptive backend spawning disabled "
1618 "(server.max_worker is non-zero)");
1619 }
1620 if (host->max_load_per_proc < 1)
1621 host->max_load_per_proc = 0;
1622
1623 if (s->debug) {
1624 log_error(srv->errh, __FILE__, __LINE__,
1625 "--- gw spawning local"
1626 "\n\tproc: %s"
1627 "\n\tport: %hu"
1628 "\n\tsocket %s"
1629 "\n\tmin-procs: %d"
1630 "\n\tmax-procs: %d",
1631 host->bin_path->ptr,
1632 host->port,
1633 host->unixsocket ? host->unixsocket->ptr : "",
1634 host->min_procs,
1635 host->max_procs);
1636 }
1637
1638 for (uint32_t pno = 0; pno < host->min_procs; ++pno) {
1639 gw_proc * const proc = gw_proc_init(host);
1640
1641 if (s->debug) {
1642 log_error(srv->errh, __FILE__, __LINE__,
1643 "--- gw spawning"
1644 "\n\tport: %hu"
1645 "\n\tsocket %s"
1646 "\n\tcurrent: %u / %u",
1647 host->port,
1648 host->unixsocket ? host->unixsocket->ptr : "",
1649 pno, host->max_procs);
1650 }
1651
1652 if (0 != gw_proc_sockaddr_init(host, proc, srv->errh)) {
1653 gw_proc_free(proc);
1654 goto error;
1655 }
1656
1657 if (!srv->srvconf.preflight_check
1658 && gw_spawn_connection(host, proc, srv->errh, s->debug)) {
1659 log_error(srv->errh, __FILE__, __LINE__,
1660 "[ERROR]: spawning gw failed.");
1661 gw_proc_free(proc);
1662 goto error;
1663 }
1664
1665 proc->next = host->first;
1666 if (host->first) host->first->prev = proc;
1667 host->first = proc;
1668 ++host->num_procs;
1669 }
1670
1671 if (graceful_restart_bg) {
1672 /*(set flag to false to avoid repeating)*/
1673 graceful_restart_bg = 0;
1674 log_error(srv->errh, __FILE__, __LINE__,
1675 "server.graceful-restart-bg disabled "
1676 "(incompatible with %s.server \"bin-path\")",
1677 p->self->name);
1678 data_unset * const du =
1679 array_get_data_unset(srv->srvconf.feature_flags,
1680 CONST_STR_LEN("server.graceful-restart-bg"));
1681 if (du->type == TYPE_STRING)
1682 buffer_copy_string_len(&((data_string *)du)->value,
1683 CONST_STR_LEN("false"));
1684 else /* (du->type == TYPE_INTEGER) */
1685 ((data_integer *)du)->value = 0;
1686 }
1687 } else {
1688 gw_proc * const proc = gw_proc_init(host);
1689 host->first = proc;
1690 ++host->num_procs;
1691 host->min_procs = 1;
1692 host->max_procs = 1;
1693 if (0 != gw_proc_sockaddr_init(host, proc, srv->errh)) goto error;
1694 gw_proc_set_state(host, proc, PROC_STATE_RUNNING);
1695 }
1696
1697 const buffer * const h = host->host ? host->host : host->unixsocket;
1698 host->gw_hash = gw_hash(BUF_PTR_LEN(h), DJBHASH_INIT);
1699
1700 /* s->exts is list of exts -> hosts
1701 * s->exts now used as combined list
1702 * of authorizer and responder hosts (for backend maintenance)
1703 * s->exts_auth is list of exts -> authorizer hosts
1704 * s->exts_resp is list of exts -> responder hosts
1705 * For each path/extension:
1706 * there may be an independent GW_AUTHORIZER and GW_RESPONDER
1707 * (The GW_AUTHORIZER and GW_RESPONDER could be handled by the same
1708 * host, and an admin might want to do that for large uploads,
1709 * since GW_AUTHORIZER runs prior to receiving (potentially large)
1710 * request body from client and can authorizer or deny request
1711 * prior to receiving the full upload)
1712 */
1713 gw_extension_insert(s->exts, &da_ext->key, host);
1714
1715 if (host_mode == GW_AUTHORIZER) {
1716 ++host->refcount;
1717 gw_extension_insert(s->exts_auth, &da_ext->key, host);
1718 } else if (host_mode == GW_RESPONDER) {
1719 ++host->refcount;
1720 gw_extension_insert(s->exts_resp, &da_ext->key, host);
1721 } /*(else should have been rejected above)*/
1722
1723 host = NULL;
1724 }
1725 }
1726
1727 return 1;
1728
1729 error:
1730 if (NULL != host) gw_host_free(host);
1731 return 0;
1732 }
1733
gw_get_defaults_balance(server * srv,const buffer * b)1734 int gw_get_defaults_balance(server *srv, const buffer *b) {
1735 if (!b || buffer_is_blank(b))
1736 return GW_BALANCE_LEAST_CONNECTION;
1737 if (buffer_eq_slen(b, CONST_STR_LEN("fair")))
1738 return GW_BALANCE_LEAST_CONNECTION;
1739 if (buffer_eq_slen(b, CONST_STR_LEN("least-connection")))
1740 return GW_BALANCE_LEAST_CONNECTION;
1741 if (buffer_eq_slen(b, CONST_STR_LEN("round-robin")))
1742 return GW_BALANCE_RR;
1743 if (buffer_eq_slen(b, CONST_STR_LEN("hash")))
1744 return GW_BALANCE_HASH;
1745 if (buffer_eq_slen(b, CONST_STR_LEN("sticky")))
1746 return GW_BALANCE_STICKY;
1747
1748 log_error(srv->errh, __FILE__, __LINE__,
1749 "xxxxx.balance has to be one of: "
1750 "least-connection, round-robin, hash, sticky, but not: %s", b->ptr);
1751 return GW_BALANCE_LEAST_CONNECTION;
1752 }
1753
1754
gw_set_state(gw_handler_ctx * hctx,gw_connection_state_t state)1755 static void gw_set_state(gw_handler_ctx *hctx, gw_connection_state_t state) {
1756 hctx->state = state;
1757 /*hctx->state_timestamp = log_monotonic_secs;*/
1758 }
1759
1760
gw_set_transparent(gw_handler_ctx * hctx)1761 void gw_set_transparent(gw_handler_ctx *hctx) {
1762 if (AF_UNIX != hctx->host->family) {
1763 if (-1 == fdevent_set_tcp_nodelay(hctx->fd, 1)) {
1764 /*(error, but not critical)*/
1765 }
1766 }
1767 hctx->wb_reqlen = -1;
1768 gw_set_state(hctx, GW_STATE_WRITE);
1769 }
1770
1771
gw_host_hctx_enq(gw_handler_ctx * const hctx)1772 static void gw_host_hctx_enq(gw_handler_ctx * const hctx) {
1773 gw_host * const host = hctx->host;
1774 /*if (__builtin_expect( (host == NULL), 0)) return;*/
1775
1776 hctx->prev = NULL;
1777 hctx->next = host->hctxs;
1778 if (hctx->next)
1779 hctx->next->prev = hctx;
1780 host->hctxs = hctx;
1781 }
1782
1783
gw_host_hctx_deq(gw_handler_ctx * const hctx)1784 static void gw_host_hctx_deq(gw_handler_ctx * const hctx) {
1785 /*if (__builtin_expect( (hctx->host == NULL), 0)) return;*/
1786
1787 if (hctx->prev)
1788 hctx->prev->next = hctx->next;
1789 else
1790 hctx->host->hctxs= hctx->next;
1791
1792 if (hctx->next)
1793 hctx->next->prev = hctx->prev;
1794
1795 hctx->next = NULL;
1796 hctx->prev = NULL;
1797 }
1798
1799
gw_backend_close(gw_handler_ctx * const hctx,request_st * const r)1800 static void gw_backend_close(gw_handler_ctx * const hctx, request_st * const r) {
1801 if (hctx->fd >= 0) {
1802 fdevent_fdnode_event_del(hctx->ev, hctx->fdn);
1803 /*fdevent_unregister(ev, hctx->fdn);*//*(handled below)*/
1804 fdevent_sched_close(hctx->ev, hctx->fdn);
1805 hctx->fdn = NULL;
1806 hctx->fd = -1;
1807 gw_host_hctx_deq(hctx);
1808 }
1809
1810 if (hctx->host) {
1811 if (hctx->proc) {
1812 gw_proc_release(hctx->host, hctx->proc, hctx->conf.debug,
1813 r->conf.errh);
1814 hctx->proc = NULL;
1815 }
1816
1817 gw_host_reset(hctx->host);
1818 hctx->host = NULL;
1819 }
1820 }
1821
gw_connection_close(gw_handler_ctx * const hctx,request_st * const r)1822 static void gw_connection_close(gw_handler_ctx * const hctx, request_st * const r) {
1823 gw_plugin_data *p = hctx->plugin_data;
1824
1825 gw_backend_close(hctx, r);
1826 handler_ctx_free(hctx);
1827 r->plugin_ctx[p->id] = NULL;
1828
1829 if (r->handler_module == p->self) {
1830 http_response_backend_done(r);
1831 }
1832 }
1833
gw_reconnect(gw_handler_ctx * const hctx,request_st * const r)1834 static handler_t gw_reconnect(gw_handler_ctx * const hctx, request_st * const r) {
1835 gw_backend_close(hctx, r);
1836
1837 hctx->host = gw_host_get(r,hctx->ext,hctx->conf.balance,hctx->conf.debug);
1838 if (NULL == hctx->host) return HANDLER_FINISHED;
1839
1840 gw_host_assign(hctx->host);
1841 hctx->request_id = 0;
1842 hctx->opts.xsendfile_allow = hctx->host->xsendfile_allow;
1843 hctx->opts.xsendfile_docroot = hctx->host->xsendfile_docroot;
1844 gw_set_state(hctx, GW_STATE_INIT);
1845 return HANDLER_COMEBACK;
1846 }
1847
1848
gw_handle_request_reset(request_st * const r,void * p_d)1849 handler_t gw_handle_request_reset(request_st * const r, void *p_d) {
1850 gw_plugin_data *p = p_d;
1851 gw_handler_ctx *hctx = r->plugin_ctx[p->id];
1852 if (hctx) gw_connection_close(hctx, r);
1853
1854 return HANDLER_GO_ON;
1855 }
1856
1857
1858 __attribute_cold__
gw_conditional_tcp_fin(gw_handler_ctx * const hctx,request_st * const r)1859 static void gw_conditional_tcp_fin(gw_handler_ctx * const hctx, request_st * const r) {
1860 /*assert(r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_TCP_FIN);*/
1861 if (!chunkqueue_is_empty(&hctx->wb))return;
1862 if (!hctx->host->tcp_fin_propagate) return;
1863 if (hctx->gw_mode == GW_AUTHORIZER) return;
1864 if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BACKEND_SHUT_WR)
1865 return;
1866
1867 /* propagate shutdown SHUT_WR to backend if TCP half-close on con->fd */
1868 r->conf.stream_request_body |= FDEVENT_STREAM_REQUEST_BACKEND_SHUT_WR;
1869 r->conf.stream_request_body &= ~FDEVENT_STREAM_REQUEST_POLLIN;
1870 r->con->is_readable = 0;
1871 shutdown(hctx->fd, SHUT_WR);
1872 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
1873 }
1874
gw_write_refill_wb(gw_handler_ctx * const hctx,request_st * const r)1875 static handler_t gw_write_refill_wb(gw_handler_ctx * const hctx, request_st * const r) {
1876 if (chunkqueue_is_empty(&r->reqbody_queue))
1877 return HANDLER_GO_ON;
1878 if (hctx->stdin_append) {
1879 if (chunkqueue_length(&hctx->wb) < 65536 - 16384)
1880 return hctx->stdin_append(hctx);
1881 }
1882 else {
1883 const chunk * const c = r->reqbody_queue.last;
1884 const off_t qlen = chunkqueue_length(&r->reqbody_queue);
1885 if (c->type == FILE_CHUNK) {
1886 /*(move all but last chunk if reqbody_queue using tempfiles, unless
1887 * hctx->wb is empty and only one chunk, then move last chunk)*/
1888 if (c != r->reqbody_queue.first)
1889 chunkqueue_steal(&hctx->wb, &r->reqbody_queue,
1890 qlen - (c->file.length-c->offset));
1891 else if (chunkqueue_is_empty(&hctx->wb))
1892 chunkqueue_append_chunkqueue(&hctx->wb, &r->reqbody_queue);
1893 }
1894 else if (qlen + chunkqueue_length(&hctx->wb) > 65536) {
1895 if (0 != chunkqueue_steal_with_tempfiles(&hctx->wb,
1896 &r->reqbody_queue, qlen, r->conf.errh))
1897 return HANDLER_ERROR;
1898 }
1899 else
1900 chunkqueue_append_chunkqueue(&hctx->wb, &r->reqbody_queue);
1901 }
1902 return HANDLER_GO_ON;
1903 }
1904
gw_write_request(gw_handler_ctx * const hctx,request_st * const r)1905 static handler_t gw_write_request(gw_handler_ctx * const hctx, request_st * const r) {
1906 switch(hctx->state) {
1907 case GW_STATE_INIT:
1908 /* do we have a running process for this host (max-procs) ? */
1909 hctx->proc = NULL;
1910
1911 for (gw_proc *proc = hctx->host->first; proc; proc = proc->next) {
1912 if (proc->state == PROC_STATE_RUNNING) {
1913 hctx->proc = proc;
1914 break;
1915 }
1916 }
1917
1918 /* all children are dead */
1919 if (hctx->proc == NULL) {
1920 return HANDLER_ERROR;
1921 }
1922
1923 /* check the other procs if they have a lower load */
1924 for (gw_proc *proc = hctx->proc->next; proc; proc = proc->next) {
1925 if (proc->state != PROC_STATE_RUNNING) continue;
1926 if (proc->load < hctx->proc->load) hctx->proc = proc;
1927 }
1928
1929 gw_proc_load_inc(hctx->host, hctx->proc);
1930
1931 hctx->fd = fdevent_socket_nb_cloexec(hctx->host->family,SOCK_STREAM,0);
1932 if (-1 == hctx->fd) {
1933 log_perror(r->conf.errh, __FILE__, __LINE__,
1934 "socket() failed (cur_fds:%d) (max_fds:%d)",
1935 r->con->srv->cur_fds, r->con->srv->max_fds);
1936 return HANDLER_ERROR;
1937 }
1938
1939 ++r->con->srv->cur_fds;
1940
1941 hctx->fdn = fdevent_register(hctx->ev,hctx->fd,gw_handle_fdevent,hctx);
1942
1943 if (hctx->proc->is_local) {
1944 hctx->pid = hctx->proc->pid;
1945 }
1946
1947 hctx->write_ts = log_monotonic_secs;
1948 gw_host_hctx_enq(hctx);
1949 switch (gw_establish_connection(r, hctx->host, hctx->proc, hctx->pid,
1950 hctx->fd, hctx->conf.debug)) {
1951 case 1: /* connection is in progress */
1952 fdevent_fdnode_event_set(hctx->ev, hctx->fdn, FDEVENT_OUT);
1953 gw_set_state(hctx, GW_STATE_CONNECT_DELAYED);
1954 return HANDLER_WAIT_FOR_EVENT;
1955 case -1:/* connection error */
1956 return HANDLER_ERROR;
1957 case 0: /* everything is ok, go on */
1958 hctx->reconnects = 0;
1959 break;
1960 }
1961 __attribute_fallthrough__
1962 case GW_STATE_CONNECT_DELAYED:
1963 if (hctx->state == GW_STATE_CONNECT_DELAYED) { /*(not GW_STATE_INIT)*/
1964 if (!(fdevent_fdnode_interest(hctx->fdn) & FDEVENT_OUT))
1965 return HANDLER_WAIT_FOR_EVENT;
1966 int socket_error = fdevent_connect_status(hctx->fd);
1967 if (socket_error != 0) {
1968 gw_proc_connect_error(r, hctx->host, hctx->proc, hctx->pid,
1969 socket_error, hctx->conf.debug);
1970 return HANDLER_ERROR;
1971 }
1972 /* go on with preparing the request */
1973 hctx->write_ts = log_monotonic_secs;
1974 }
1975
1976 gw_proc_connect_success(hctx->host, hctx->proc, hctx->conf.debug, r);
1977
1978 gw_set_state(hctx, GW_STATE_PREPARE_WRITE);
1979 __attribute_fallthrough__
1980 case GW_STATE_PREPARE_WRITE:
1981 /* ok, we have the connection */
1982
1983 {
1984 handler_t rc = hctx->create_env(hctx);
1985 if (HANDLER_GO_ON != rc) {
1986 if (HANDLER_FINISHED != rc && HANDLER_ERROR != rc)
1987 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
1988 return rc;
1989 }
1990 }
1991
1992 /*(disable Nagle algorithm if streaming and content-length unknown)*/
1993 if (AF_UNIX != hctx->host->family) {
1994 if (r->reqbody_length < 0) {
1995 if (-1 == fdevent_set_tcp_nodelay(hctx->fd, 1)) {
1996 /*(error, but not critical)*/
1997 }
1998 }
1999 }
2000
2001 hctx->read_ts = log_monotonic_secs;
2002 fdevent_fdnode_event_add(hctx->ev, hctx->fdn, FDEVENT_IN|FDEVENT_RDHUP);
2003 gw_set_state(hctx, GW_STATE_WRITE);
2004 __attribute_fallthrough__
2005 case GW_STATE_WRITE:
2006 if (!chunkqueue_is_empty(&hctx->wb)) {
2007 log_error_st * const errh = r->conf.errh;
2008 #if 0
2009 if (hctx->conf.debug > 1) {
2010 log_error(errh, __FILE__, __LINE__, "sdsx",
2011 "send data to backend (fd=%d), size=%zu",
2012 hctx->fd, chunkqueue_length(&hctx->wb));
2013 }
2014 #endif
2015 off_t bytes_out = hctx->wb.bytes_out;
2016 if (r->con->srv->network_backend_write(hctx->fd, &hctx->wb,
2017 MAX_WRITE_LIMIT, errh) < 0) {
2018 switch(errno) {
2019 case EPIPE:
2020 case ENOTCONN:
2021 case ECONNRESET:
2022 /* the connection got dropped after accept()
2023 * we don't care about that --
2024 * if you accept() it, you have to handle it.
2025 */
2026 log_error(errh, __FILE__, __LINE__,
2027 "connection was dropped after accept() "
2028 "(perhaps the gw process died), "
2029 "write-offset: %lld socket: %s",
2030 (long long)hctx->wb.bytes_out,
2031 hctx->proc->connection_name->ptr);
2032 return HANDLER_ERROR;
2033 default:
2034 log_perror(errh, __FILE__, __LINE__, "write failed");
2035 return HANDLER_ERROR;
2036 }
2037 }
2038 else if (hctx->wb.bytes_out > bytes_out) {
2039 hctx->write_ts = hctx->proc->last_used = log_monotonic_secs;
2040 handler_t rc = gw_write_refill_wb(hctx, r);
2041 if (HANDLER_GO_ON != rc) return rc;
2042 }
2043 }
2044
2045 if (hctx->wb.bytes_out == hctx->wb_reqlen) {
2046 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
2047 gw_set_state(hctx, GW_STATE_READ);
2048 } else {
2049 off_t wblen = chunkqueue_length(&hctx->wb);
2050 if ((hctx->wb.bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0)
2051 && wblen < 65536 - 16384) {
2052 /*(r->conf.stream_request_body & FDEVENT_STREAM_REQUEST)*/
2053 if (!(r->conf.stream_request_body
2054 & FDEVENT_STREAM_REQUEST_POLLIN)) {
2055 r->conf.stream_request_body |=
2056 FDEVENT_STREAM_REQUEST_POLLIN;
2057 r->con->is_readable = 1; /* trigger optimistic client read */
2058 }
2059 }
2060 if (0 == wblen) {
2061 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_OUT);
2062 }
2063 else if (!(fdevent_fdnode_interest(hctx->fdn) & FDEVENT_OUT)) {
2064 hctx->write_ts = log_monotonic_secs;
2065 fdevent_fdnode_event_add(hctx->ev, hctx->fdn, FDEVENT_OUT);
2066 }
2067 }
2068
2069 if (r->conf.stream_request_body
2070 & FDEVENT_STREAM_REQUEST_TCP_FIN)
2071 gw_conditional_tcp_fin(hctx, r);
2072
2073 return HANDLER_WAIT_FOR_EVENT;
2074 case GW_STATE_READ:
2075 /* waiting for a response */
2076 return HANDLER_WAIT_FOR_EVENT;
2077 default:
2078 log_error(r->conf.errh, __FILE__, __LINE__,
2079 "(debug) unknown state");
2080 return HANDLER_ERROR;
2081 }
2082 }
2083
2084
2085 __attribute_cold__
2086 __attribute_noinline__
gw_backend_error(gw_handler_ctx * const hctx,request_st * const r)2087 static handler_t gw_backend_error(gw_handler_ctx * const hctx, request_st * const r)
2088 {
2089 if (hctx->backend_error) hctx->backend_error(hctx);
2090 http_response_backend_error(r);
2091 gw_connection_close(hctx, r);
2092 return HANDLER_FINISHED;
2093 }
2094
2095
2096 static handler_t gw_recv_response(gw_handler_ctx *hctx, request_st *r);
2097
2098
2099 __attribute_cold__
gw_write_error(gw_handler_ctx * const hctx,request_st * const r)2100 static handler_t gw_write_error(gw_handler_ctx * const hctx, request_st * const r) {
2101
2102 if (hctx->state == GW_STATE_INIT ||
2103 hctx->state == GW_STATE_CONNECT_DELAYED) {
2104
2105 /* (optimization to detect backend process exit while processing a
2106 * large number of ready events; (this block could be removed)) */
2107 if (hctx->proc && hctx->proc->is_local) {
2108 server * const srv = r->con->srv;
2109 if (0 == srv->srvconf.max_worker)
2110 gw_restart_dead_procs(hctx->host,srv->errh,hctx->conf.debug,0);
2111 }
2112
2113 /* cleanup this request and let request handler start request again */
2114 if (hctx->reconnects++ < 5) return gw_reconnect(hctx, r);
2115 }
2116 else {
2117 /* backend might not read request body (even though backend should)
2118 * before sending response, so it is possible to get EPIPE trying to
2119 * write request body to the backend when backend has already sent a
2120 * response. If called from gw_handle_fdevent(), response should have
2121 * been read prior to getting here. However, if reqbody arrived on
2122 * client side, and called gw_handle_subrequest() and we tried to write
2123 * in gw_send_request() in state GW_STATE_WRITE, then it is possible to
2124 * get EPIPE and error out here when response is waiting to be read from
2125 * kernel socket buffers. Since we did not actually receive FDEVENT_HUP
2126 * or FDEVENT_RDHUP, calling gw_handle_fdevent() and fabricating
2127 * FDEVENT_RDHUP would cause an infinite loop trying to read().
2128 * Instead, try once to read (small) response in this theoretical race*/
2129 handler_t rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2130 if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/
2131 }
2132
2133 /*(r->status == 400 if hctx->create_env() failed)*/
2134 if (!r->resp_body_started && r->http_status < 500 && r->http_status != 400)
2135 r->http_status = 503; /* Service Unavailable */
2136
2137 return gw_backend_error(hctx, r); /* HANDLER_FINISHED */
2138 }
2139
gw_send_request(gw_handler_ctx * const hctx,request_st * const r)2140 static handler_t gw_send_request(gw_handler_ctx * const hctx, request_st * const r) {
2141 handler_t rc = gw_write_request(hctx, r);
2142 return (HANDLER_ERROR != rc) ? rc : gw_write_error(hctx, r);
2143 }
2144
2145
gw_handle_subrequest(request_st * const r,void * p_d)2146 handler_t gw_handle_subrequest(request_st * const r, void *p_d) {
2147 gw_plugin_data *p = p_d;
2148 gw_handler_ctx *hctx = r->plugin_ctx[p->id];
2149 if (NULL == hctx) return HANDLER_GO_ON;
2150
2151 const int revents = hctx->revents;
2152 if (revents) {
2153 hctx->revents = 0;
2154 handler_t rc = gw_process_fdevent(hctx, r, revents);
2155 if (rc != HANDLER_GO_ON && rc != HANDLER_WAIT_FOR_EVENT)
2156 return rc; /*(might invalidate hctx)*/
2157 }
2158
2159 if ((r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
2160 && r->resp_body_started) {
2161 if (chunkqueue_length(&r->write_queue) > 65536 - 4096) {
2162 /* Note: if apps inheriting gw_handle use hctx->rb, then those apps
2163 * are responsible for limiting amount of data buffered in memory
2164 * in hctx->rb. Currently, mod_fastcgi is the only core app doing
2165 * so, and the maximum FCGI_Record size is 8 + 65535 + 255 = 65798
2166 * (FCGI_HEADER_LEN(8)+contentLength(65535)+paddingLength(255)) */
2167 fdevent_fdnode_event_clr(hctx->ev, hctx->fdn, FDEVENT_IN);
2168 }
2169 else if (!(fdevent_fdnode_interest(hctx->fdn) & FDEVENT_IN)) {
2170 /* optimistic read from backend */
2171 handler_t rc;
2172 rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2173 if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/
2174 hctx->read_ts = log_monotonic_secs;
2175 fdevent_fdnode_event_add(hctx->ev, hctx->fdn, FDEVENT_IN);
2176 }
2177 }
2178
2179 /* (do not receive request body before GW_AUTHORIZER has run or else
2180 * the request body is discarded with handler_ctx_clear() after running
2181 * the FastCGI Authorizer) */
2182
2183 if (hctx->gw_mode != GW_AUTHORIZER
2184 && (0 == hctx->wb.bytes_in
2185 ? (r->state == CON_STATE_READ_POST || -1 == hctx->wb_reqlen)
2186 : (hctx->wb.bytes_in < hctx->wb_reqlen || hctx->wb_reqlen < 0))) {
2187 /* leave excess data in r->reqbody_queue, which is
2188 * buffered to disk if too large and backend can not keep up */
2189 /*(64k - 4k to attempt to avoid temporary files
2190 * in conjunction with FDEVENT_STREAM_REQUEST_BUFMIN)*/
2191 if (chunkqueue_length(&hctx->wb) > 65536 - 4096
2192 && (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BUFMIN)) {
2193 r->conf.stream_request_body &= ~FDEVENT_STREAM_REQUEST_POLLIN;
2194 return HANDLER_WAIT_FOR_EVENT;
2195 }
2196 else {
2197 handler_t rc = r->con->reqbody_read(r);
2198
2199 if (hctx->opts.backend == BACKEND_PROXY) {
2200 if (hctx->state == GW_STATE_INIT /* ??? < GW_STATE_WRITE ??? */
2201 && rc == HANDLER_WAIT_FOR_EVENT
2202 /* streaming flags might not be set yet
2203 * if hctx->create_env() not called yet */
2204 && ((r->conf.stream_request_body & FDEVENT_STREAM_REQUEST)
2205 || r->h2_connect_ext))
2206 rc = HANDLER_GO_ON;
2207 /* connect() to backend proxy w/o waiting for any request body*/
2208 }
2209 else if (-1 == r->reqbody_length) {
2210 /* XXX: create configurable flag */
2211 /* CGI environment requires that Content-Length be set.
2212 * Send 411 Length Required if Content-Length missing.
2213 * (occurs here if client sends Transfer-Encoding: chunked
2214 * and module is flagged to stream request body to backend) */
2215 return (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST)
2216 ? http_response_reqbody_read_error(r, 411)
2217 : (rc == HANDLER_GO_ON) ? HANDLER_WAIT_FOR_EVENT : rc;
2218 }
2219
2220 if (hctx->wb_reqlen < -1 && r->reqbody_length >= 0) {
2221 /* (completed receiving Transfer-Encoding: chunked) */
2222 hctx->wb_reqlen = -hctx->wb_reqlen;
2223 if (hctx->stdin_append) {
2224 handler_t rca = hctx->stdin_append(hctx);
2225 if (HANDLER_GO_ON != rca) return rca;
2226 }
2227 else
2228 chunkqueue_append_chunkqueue(&hctx->wb, &r->reqbody_queue);
2229 }
2230
2231 if (0 != hctx->wb.bytes_in || -1 == hctx->wb_reqlen) {
2232 handler_t rca = gw_write_refill_wb(hctx, r);
2233 if (HANDLER_GO_ON != rca) return rca;
2234 if (fdevent_fdnode_interest(hctx->fdn) & FDEVENT_OUT) {
2235 return (rc == HANDLER_GO_ON) ? HANDLER_WAIT_FOR_EVENT : rc;
2236 }
2237 }
2238 if (rc != HANDLER_GO_ON) return rc;
2239 }
2240 }
2241
2242 {
2243 handler_t rc =((0==hctx->wb.bytes_in || !chunkqueue_is_empty(&hctx->wb))
2244 && hctx->state != GW_STATE_CONNECT_DELAYED)
2245 ? gw_send_request(hctx, r)
2246 : HANDLER_WAIT_FOR_EVENT;
2247 if (HANDLER_WAIT_FOR_EVENT != rc) return rc;
2248 }
2249
2250 if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_TCP_FIN)
2251 gw_conditional_tcp_fin(hctx, r);
2252
2253 return HANDLER_WAIT_FOR_EVENT;
2254 }
2255
2256
gw_authorizer_ok(gw_handler_ctx * const hctx,request_st * const r)2257 static handler_t gw_authorizer_ok(gw_handler_ctx * const hctx, request_st * const r) {
2258 /*
2259 * If we are here in AUTHORIZER mode then a request for authorizer
2260 * was processed already, and status 200 has been returned. We need
2261 * now to handle authorized request.
2262 */
2263 char *physpath = NULL;
2264
2265 gw_host * const host = hctx->host;
2266 if (host->docroot) {
2267 buffer_copy_buffer(&r->physical.doc_root, host->docroot);
2268 buffer_copy_buffer(&r->physical.basedir, host->docroot);
2269 buffer_copy_path_len2(&r->physical.path,
2270 BUF_PTR_LEN(host->docroot),
2271 BUF_PTR_LEN(&r->uri.path));
2272 physpath = r->physical.path.ptr;
2273 }
2274
2275 /*(restore streaming flags removed during authorizer processing)*/
2276 r->conf.stream_response_body |= (hctx->opts.authorizer >> 1);
2277
2278 gw_backend_close(hctx, r);
2279 handler_ctx_clear(hctx);
2280
2281 /* don't do more than 6 loops here; normally shouldn't happen */
2282 if (++r->loops_per_request > 5) {
2283 log_error(r->conf.errh, __FILE__, __LINE__,
2284 "too many loops while processing request: %s",
2285 r->target_orig.ptr);
2286 r->http_status = 500; /* Internal Server Error */
2287 r->handler_module = NULL;
2288 return HANDLER_FINISHED;
2289 }
2290
2291 /* restart the request so other handlers can process it */
2292
2293 if (physpath) r->physical.path.ptr = NULL;
2294 http_response_reset(r); /*(includes r->http_status=0)*/
2295 /* preserve r->physical.path.ptr with modified docroot */
2296 if (physpath) r->physical.path.ptr = physpath;
2297
2298 /*(FYI: if multiple FastCGI authorizers were to be supported,
2299 * next one could be started here instead of restarting request)*/
2300
2301 r->handler_module = NULL;
2302 return HANDLER_COMEBACK;
2303 }
2304
2305
2306 __attribute_cold__
2307 static handler_t gw_recv_response_error(gw_handler_ctx * const hctx, request_st * const r, gw_proc * const proc);
2308
2309
gw_recv_response(gw_handler_ctx * const hctx,request_st * const r)2310 static handler_t gw_recv_response(gw_handler_ctx * const hctx, request_st * const r) {
2311 /*(XXX: make this a configurable flag for other protocols)*/
2312 buffer *b = (hctx->opts.backend == BACKEND_FASTCGI
2313 || hctx->opts.backend == BACKEND_AJP13)
2314 ? chunk_buffer_acquire()
2315 : hctx->response;
2316 const off_t bytes_in = r->write_queue.bytes_in;
2317
2318 handler_t rc = http_response_read(r, &hctx->opts, b, hctx->fdn);
2319
2320 if (b != hctx->response) chunk_buffer_release(b);
2321
2322 gw_proc * const proc = hctx->proc;
2323
2324 switch (rc) {
2325 default:
2326 /* change in r->write_queue.bytes_in used to approximate backend read,
2327 * since bytes read from backend, if any, might be consumed from b by
2328 * hctx->opts->parse callback, hampering detection here. However, this
2329 * may not be triggered for partial collection of HTTP response headers
2330 * or partial packets for backend protocol (e.g. FastCGI) */
2331 if (r->write_queue.bytes_in > bytes_in)
2332 hctx->read_ts = proc->last_used = log_monotonic_secs;
2333 return HANDLER_GO_ON;
2334 case HANDLER_FINISHED:
2335 /*hctx->read_ts =*/ proc->last_used = log_monotonic_secs;
2336
2337 if (hctx->gw_mode == GW_AUTHORIZER
2338 && (200 == r->http_status || 0 == r->http_status))
2339 return gw_authorizer_ok(hctx, r);
2340
2341 gw_connection_close(hctx, r);
2342 return HANDLER_FINISHED;
2343 case HANDLER_COMEBACK: /*(not expected; treat as error)*/
2344 case HANDLER_ERROR:
2345 return gw_recv_response_error(hctx, r, proc);
2346 }
2347 }
2348
2349
2350 __attribute_cold__
gw_recv_response_error(gw_handler_ctx * const hctx,request_st * const r,gw_proc * const proc)2351 static handler_t gw_recv_response_error(gw_handler_ctx * const hctx, request_st * const r, gw_proc * const proc)
2352 {
2353 /* (optimization to detect backend process exit while processing a
2354 * large number of ready events; (this block could be removed)) */
2355 if (proc->is_local && 1 == proc->load && proc->pid == hctx->pid
2356 && proc->state != PROC_STATE_DIED
2357 && 0 == r->con->srv->srvconf.max_worker) {
2358 /* intentionally check proc->disabed_until before gw_proc_waitpid */
2359 gw_host * const host = hctx->host;
2360 log_error_st * const errh = r->con->srv->errh;
2361 if (proc->disabled_until < log_monotonic_secs
2362 && 0 != gw_proc_waitpid(host, proc, errh)) {
2363 if (hctx->conf.debug) {
2364 log_error(errh, __FILE__, __LINE__,
2365 "--- gw spawning\n\tsocket %s\n\tcurrent: 1/%d",
2366 proc->connection_name->ptr, host->num_procs);
2367 }
2368
2369 if (gw_spawn_connection(host, proc, errh, hctx->conf.debug)) {
2370 log_error(errh, __FILE__, __LINE__,
2371 "respawning failed, will retry later");
2372 }
2373 }
2374 }
2375
2376 if (r->resp_body_started == 0) {
2377 /* nothing has been sent out yet, try to use another child */
2378
2379 if (hctx->wb.bytes_out == 0 && hctx->reconnects++ < 5) {
2380 log_error(r->conf.errh, __FILE__, __LINE__,
2381 "response not received, request not sent on "
2382 "socket: %s for %s?%.*s, reconnecting",
2383 proc->connection_name->ptr,
2384 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query));
2385
2386 return gw_reconnect(hctx, r);
2387 }
2388
2389 log_error(r->conf.errh, __FILE__, __LINE__,
2390 "response not received, request sent: %lld on "
2391 "socket: %s for %s?%.*s, closing connection",
2392 (long long)hctx->wb.bytes_out, proc->connection_name->ptr,
2393 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query));
2394 } else if (!light_btst(r->resp_htags, HTTP_HEADER_UPGRADE)
2395 && !r->h2_connect_ext) {
2396 log_error(r->conf.errh, __FILE__, __LINE__,
2397 "response already sent out, but backend returned error on "
2398 "socket: %s for %s?%.*s, terminating connection",
2399 proc->connection_name->ptr,
2400 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query));
2401 }
2402
2403 return gw_backend_error(hctx, r); /* HANDLER_FINISHED */
2404 }
2405
2406
gw_handle_fdevent(void * ctx,int revents)2407 static handler_t gw_handle_fdevent(void *ctx, int revents) {
2408 gw_handler_ctx *hctx = ctx;
2409 hctx->revents |= revents;
2410 joblist_append(hctx->con);
2411 return HANDLER_FINISHED;
2412 }
2413
gw_process_fdevent(gw_handler_ctx * const hctx,request_st * const r,int revents)2414 static handler_t gw_process_fdevent(gw_handler_ctx * const hctx, request_st * const r, int revents) {
2415 if (revents & FDEVENT_IN) {
2416 handler_t rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2417 if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/
2418 }
2419
2420 if (revents & FDEVENT_OUT) {
2421 return gw_send_request(hctx, r); /*(might invalidate hctx)*/
2422 }
2423
2424 /* perhaps this issue is already handled */
2425 if (revents & (FDEVENT_HUP|FDEVENT_RDHUP)) {
2426 if (hctx->state == GW_STATE_CONNECT_DELAYED) {
2427 return gw_send_request(hctx, r); /*(might invalidate hctx)*/
2428 } else if (r->resp_body_started) {
2429 /* drain any remaining data from kernel pipe buffers
2430 * even if (r->conf.stream_response_body
2431 * & FDEVENT_STREAM_RESPONSE_BUFMIN)
2432 * since event loop will spin on fd FDEVENT_HUP event
2433 * until unregistered. */
2434 handler_t rc;
2435 const unsigned short flags = r->conf.stream_response_body;
2436 r->conf.stream_response_body &= ~FDEVENT_STREAM_RESPONSE_BUFMIN;
2437 r->conf.stream_response_body |= FDEVENT_STREAM_RESPONSE_POLLRDHUP;
2438 do {
2439 rc = gw_recv_response(hctx, r); /*(might invalidate hctx)*/
2440 } while (rc == HANDLER_GO_ON); /*(unless HANDLER_GO_ON)*/
2441 r->conf.stream_response_body = flags;
2442 return rc; /* HANDLER_FINISHED or HANDLER_ERROR */
2443 } else {
2444 gw_proc *proc = hctx->proc;
2445 log_error(r->conf.errh, __FILE__, __LINE__,
2446 "error: unexpected close of gw connection for %s?%.*s "
2447 "(no gw process on socket: %s ?) %d",
2448 r->uri.path.ptr, BUFFER_INTLEN_PTR(&r->uri.query),
2449 proc->connection_name->ptr, hctx->state);
2450
2451 gw_connection_close(hctx, r);
2452 return HANDLER_FINISHED;
2453 }
2454 } else if (revents & FDEVENT_ERR) {
2455 log_error(r->conf.errh, __FILE__, __LINE__,
2456 "gw: got a FDEVENT_ERR. Don't know why.");
2457 return gw_backend_error(hctx, r); /* HANDLER_FINISHED */
2458 }
2459
2460 return HANDLER_GO_ON;
2461 }
2462
gw_check_extension(request_st * const r,gw_plugin_data * const p,int uri_path_handler,size_t hctx_sz)2463 handler_t gw_check_extension(request_st * const r, gw_plugin_data * const p, int uri_path_handler, size_t hctx_sz) {
2464 #if 0 /*(caller must handle)*/
2465 if (NULL != r->handler_module) return HANDLER_GO_ON;
2466 gw_patch_connection(r, p);
2467 if (NULL == p->conf.exts) return HANDLER_GO_ON;
2468 #endif
2469
2470 buffer *fn = uri_path_handler ? &r->uri.path : &r->physical.path;
2471 const size_t s_len = buffer_clen(fn);
2472 gw_extension *extension = NULL;
2473 gw_host *host = NULL;
2474 gw_handler_ctx *hctx;
2475 unsigned short gw_mode;
2476
2477 if (0 == s_len) return HANDLER_GO_ON; /*(not expected)*/
2478
2479 /* check p->conf.exts_auth list and then p->conf.ext_resp list
2480 * (skip p->conf.exts_auth if array is empty
2481 * or if GW_AUTHORIZER already ran in this request) */
2482 hctx = r->plugin_ctx[p->id];
2483 /*(hctx not NULL if GW_AUTHORIZER ran; hctx->ext_auth check is redundant)*/
2484 gw_mode = (NULL == hctx || NULL == hctx->ext_auth)
2485 ? 0 /*GW_AUTHORIZER p->conf.exts_auth will be searched next*/
2486 : GW_AUTHORIZER; /*GW_RESPONDER p->conf.exts_resp will be searched next*/
2487
2488 do {
2489
2490 gw_exts *exts;
2491 if (0 == gw_mode) {
2492 gw_mode = GW_AUTHORIZER;
2493 exts = p->conf.exts_auth;
2494 } else {
2495 gw_mode = GW_RESPONDER;
2496 exts = p->conf.exts_resp;
2497 }
2498
2499 if (0 == exts->used) continue;
2500
2501 /* gw.map-extensions maps extensions to existing gw.server entries
2502 *
2503 * gw.map-extensions = ( ".php3" => ".php" )
2504 *
2505 * gw.server = ( ".php" => ... )
2506 *
2507 * */
2508
2509 /* check if extension-mapping matches */
2510 if (p->conf.ext_mapping) {
2511 data_string *ds =
2512 (data_string *)array_match_key_suffix(p->conf.ext_mapping, fn);
2513 if (NULL != ds) {
2514 /* found a mapping */
2515 /* check if we know the extension */
2516 uint32_t k;
2517 for (k = 0; k < exts->used; ++k) {
2518 extension = exts->exts+k;
2519
2520 if (buffer_is_equal(&ds->value, &extension->key)) {
2521 break;
2522 }
2523 }
2524
2525 if (k == exts->used) {
2526 /* found nothing */
2527 extension = NULL;
2528 }
2529 }
2530 }
2531
2532 if (extension == NULL) {
2533 size_t uri_path_len = buffer_clen(&r->uri.path);
2534
2535 /* check if extension matches */
2536 for (uint32_t k = 0; k < exts->used; ++k) {
2537 gw_extension *ext = exts->exts+k;
2538 #ifdef __clang_analyzer__
2539 force_assert(ext); /*(unnecessary; quiet clang analyzer)*/
2540 #endif
2541 size_t ct_len = buffer_clen(&ext->key);
2542
2543 /* check _url_ in the form "/gw_pattern" */
2544 if (ext->key.ptr[0] == '/') {
2545 if (ct_len <= uri_path_len
2546 && 0 == memcmp(r->uri.path.ptr, ext->key.ptr, ct_len)) {
2547 extension = ext;
2548 break;
2549 }
2550 } else if (ct_len <= s_len
2551 && 0 == memcmp(fn->ptr + s_len - ct_len,
2552 ext->key.ptr, ct_len)) {
2553 /* check extension in the form ".fcg" */
2554 extension = ext;
2555 break;
2556 }
2557 }
2558 }
2559
2560 } while (NULL == extension && gw_mode != GW_RESPONDER);
2561
2562 /* extension doesn't match */
2563 if (NULL == extension) {
2564 return HANDLER_GO_ON;
2565 }
2566
2567 /* check if we have at least one server for this extension up and running */
2568 host = gw_host_get(r, extension, p->conf.balance, p->conf.debug);
2569 if (NULL == host) {
2570 return HANDLER_FINISHED;
2571 }
2572
2573 /* a note about no handler is not sent yet */
2574 extension->note_is_sent = 0;
2575
2576 /*
2577 * if check-local is disabled, use the uri.path handler
2578 *
2579 */
2580
2581 /* init handler-context */
2582 if (uri_path_handler) {
2583 if (host->check_local)
2584 return HANDLER_GO_ON;
2585
2586 /* path info rewrite is done only for /prefix/? matches */
2587 /* do not split path info for authorizer */
2588 if (extension->key.ptr[0] == '/' && gw_mode != GW_AUTHORIZER) {
2589 /* the prefix is the SCRIPT_NAME,
2590 * everything from start to the next slash
2591 * this is important for check-local = "disable"
2592 *
2593 * if prefix = /admin.gw
2594 *
2595 * /admin.gw/foo/bar
2596 *
2597 * SCRIPT_NAME = /admin.gw
2598 * PATH_INFO = /foo/bar
2599 *
2600 * if prefix = /cgi-bin/
2601 *
2602 * /cgi-bin/foo/bar
2603 *
2604 * SCRIPT_NAME = /cgi-bin/foo
2605 * PATH_INFO = /bar
2606 *
2607 * if prefix = /, and fix-root-path-name is enable
2608 *
2609 * /cgi-bin/foo/bar
2610 *
2611 * SCRIPT_NAME = /cgi-bin/foo
2612 * PATH_INFO = /bar
2613 *
2614 */
2615 /* (s_len is buffer_clen(&r->uri.path) if (uri_path_handler) */
2616 uint32_t elen = buffer_clen(&extension->key);
2617 const char *pathinfo;
2618 if (1 == elen && host->fix_root_path_name) {
2619 buffer_copy_buffer(&r->pathinfo, &r->uri.path);
2620 buffer_truncate(&r->uri.path, 0);
2621 }
2622 else if (s_len > elen
2623 && (pathinfo = strchr(r->uri.path.ptr+elen, '/'))) {
2624 /* rewrite uri.path and pathinfo */
2625 const uint32_t plen = r->uri.path.ptr + s_len - pathinfo;
2626 buffer_copy_string_len(&r->pathinfo, pathinfo, plen);
2627 buffer_truncate(&r->uri.path, s_len - plen);
2628 }
2629 }
2630 }
2631
2632 if (!hctx) hctx = handler_ctx_init(hctx_sz);
2633
2634 hctx->ev = r->con->srv->ev;
2635 hctx->r = r;
2636 hctx->con = r->con;
2637 hctx->plugin_data = p;
2638 hctx->host = host;
2639 hctx->proc = NULL;
2640 hctx->ext = extension;
2641 gw_host_assign(host);
2642
2643 hctx->gw_mode = gw_mode;
2644 if (gw_mode == GW_AUTHORIZER) {
2645 hctx->ext_auth = hctx->ext;
2646 }
2647
2648 /*hctx->conf.exts = p->conf.exts;*/
2649 /*hctx->conf.exts_auth = p->conf.exts_auth;*/
2650 /*hctx->conf.exts_resp = p->conf.exts_resp;*/
2651 /*hctx->conf.ext_mapping = p->conf.ext_mapping;*/
2652 hctx->conf.balance = p->conf.balance;
2653 hctx->conf.proto = p->conf.proto;
2654 hctx->conf.debug = p->conf.debug;
2655
2656 hctx->opts.max_per_read =
2657 !(r->conf.stream_response_body /*(if not streaming response body)*/
2658 & (FDEVENT_STREAM_RESPONSE|FDEVENT_STREAM_RESPONSE_BUFMIN))
2659 ? 262144
2660 : (r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
2661 ? 16384 /* FDEVENT_STREAM_RESPONSE_BUFMIN */
2662 : 65536; /* FDEVENT_STREAM_RESPONSE */
2663 hctx->opts.fdfmt = S_IFSOCK;
2664 hctx->opts.authorizer = (gw_mode == GW_AUTHORIZER);
2665 hctx->opts.local_redir = 0;
2666 hctx->opts.xsendfile_allow = host->xsendfile_allow;
2667 hctx->opts.xsendfile_docroot = host->xsendfile_docroot;
2668
2669 r->plugin_ctx[p->id] = hctx;
2670
2671 r->handler_module = p->self;
2672
2673 if (r->conf.log_request_handling) {
2674 log_error(r->conf.errh, __FILE__, __LINE__,
2675 "handling the request using %s", p->self->name);
2676 }
2677
2678 return HANDLER_GO_ON;
2679 }
2680
2681 __attribute_cold__
2682 __attribute_noinline__
gw_handle_trigger_hctx_timeout(gw_handler_ctx * const hctx,const char * const msg)2683 static void gw_handle_trigger_hctx_timeout(gw_handler_ctx * const hctx, const char * const msg) {
2684
2685 request_st * const r = hctx->r;
2686 joblist_append(r->con);
2687
2688 if (*msg == 'c') { /* "connect" */
2689 /* temporarily disable backend proc */
2690 gw_proc_connect_error(r, hctx->host, hctx->proc, hctx->pid,
2691 ETIMEDOUT, hctx->conf.debug);
2692 /* cleanup this request and let request handler start request again */
2693 /* retry only once since request already waited write_timeout secs */
2694 if (hctx->reconnects++ < 1) {
2695 gw_reconnect(hctx, r);
2696 return;
2697 }
2698 r->http_status = 503; /* Service Unavailable */
2699 }
2700 else { /* "read" or "write" */
2701 /* blocked waiting to send (more) data to or to receive response
2702 * (neither are a definite indication that the proc is no longer
2703 * responsive on other socket connections; not marking proc overloaded)
2704 * (If connect() to backend succeeded, then we began sending
2705 * request and filled kernel socket buffers, so request is
2706 * in progress and it is not safe or possible to retry) */
2707 /*if (hctx->conf.debug)*/
2708 log_error(r->conf.errh, __FILE__, __LINE__,
2709 "%s timeout on socket: %s (fd: %d)",
2710 msg, hctx->proc->connection_name->ptr, hctx->fd);
2711
2712 if (*msg == 'w') { /* "write" */
2713 gw_write_error(hctx, r); /*(calls gw_backend_error())*/
2714 if (r->http_status == 503) r->http_status = 504; /*Gateway Timeout*/
2715 return;
2716 } /* else "read" */
2717 }
2718 gw_backend_error(hctx, r);
2719 if (r->http_status == 500 && !r->resp_body_started && !r->handler_module)
2720 r->http_status = 504; /*Gateway Timeout*/
2721 }
2722
2723 __attribute_noinline__
gw_handle_trigger_host_timeouts(gw_host * const host)2724 static void gw_handle_trigger_host_timeouts(gw_host * const host) {
2725
2726 if (NULL == host->hctxs) return;
2727 const unix_time64_t rsecs = (unix_time64_t)host->read_timeout;
2728 const unix_time64_t wsecs = (unix_time64_t)host->write_timeout;
2729 const unix_time64_t csecs = (unix_time64_t)host->connect_timeout;
2730 if (!rsecs && !wsecs && !csecs)
2731 return; /*(no timeout policy (default))*/
2732
2733 const unix_time64_t mono = log_monotonic_secs; /*(could have callers pass)*/
2734 for (gw_handler_ctx *hctx = host->hctxs, *next; hctx; hctx = next) {
2735 /* if timeout occurs, hctx might be invalidated and removed from list,
2736 * so next element must be store before checking for timeout */
2737 next = hctx->next;
2738
2739 if (hctx->state == GW_STATE_CONNECT_DELAYED) {
2740 if (mono - hctx->write_ts > csecs && csecs) /*(waiting for write)*/
2741 gw_handle_trigger_hctx_timeout(hctx, "connect");
2742 continue; /*(do not apply wsecs below to GW_STATE_CONNECT_DELAYED)*/
2743 }
2744
2745 const int events = fdevent_fdnode_interest(hctx->fdn);
2746 if ((events & FDEVENT_IN) && mono - hctx->read_ts > rsecs && rsecs) {
2747 gw_handle_trigger_hctx_timeout(hctx, "read");
2748 continue;
2749 }
2750 if ((events & FDEVENT_OUT) && mono - hctx->write_ts > wsecs && wsecs) {
2751 gw_handle_trigger_hctx_timeout(hctx, "write");
2752 continue;
2753 }
2754 }
2755 }
2756
gw_handle_trigger_host(gw_host * const host,log_error_st * const errh,const int debug)2757 static void gw_handle_trigger_host(gw_host * const host, log_error_st * const errh, const int debug) {
2758
2759 /* check for socket timeouts on active requests to backend host */
2760 gw_handle_trigger_host_timeouts(host);
2761
2762 /* check each child proc to detect if proc exited */
2763
2764 gw_proc *proc;
2765 unix_time64_t idle_timestamp;
2766 int overload = 1;
2767
2768 #if 0 /* redundant w/ handle_waitpid hook since lighttpd 1.4.46 */
2769 for (proc = host->first; proc; proc = proc->next) {
2770 gw_proc_waitpid(host, proc, errh);
2771 }
2772 #endif
2773
2774 gw_restart_dead_procs(host, errh, debug, 1);
2775
2776 /* check if adaptive spawning enabled */
2777 if (host->min_procs == host->max_procs) return;
2778 if (!host->bin_path) return;
2779
2780 for (proc = host->first; proc; proc = proc->next) {
2781 if (proc->load <= host->max_load_per_proc) {
2782 overload = 0;
2783 break;
2784 }
2785 }
2786
2787 if (overload && host->num_procs && host->num_procs < host->max_procs) {
2788 /* overload, spawn new child */
2789 if (debug) {
2790 log_error(errh, __FILE__, __LINE__,
2791 "overload detected, spawning a new child");
2792 }
2793
2794 gw_proc_spawn(host, errh, debug);
2795 }
2796
2797 idle_timestamp = log_monotonic_secs - host->idle_timeout;
2798 for (proc = host->first; proc; proc = proc->next) {
2799 if (host->num_procs <= host->min_procs) break;
2800 if (0 != proc->load) continue;
2801 if (proc->pid <= 0) continue;
2802 if (proc->last_used >= idle_timestamp) continue;
2803
2804 /* terminate proc that has been idling for a long time */
2805 if (debug) {
2806 log_error(errh, __FILE__, __LINE__,
2807 "idle-timeout reached, terminating child: socket: %s pid %d",
2808 proc->unixsocket ? proc->unixsocket->ptr : "", proc->pid);
2809 }
2810
2811 gw_proc_kill(host, proc);
2812
2813 /* proc is now in unused, let next second handle next process */
2814 break;
2815 }
2816
2817 #if 0 /* redundant w/ handle_waitpid hook since lighttpd 1.4.46 */
2818 for (proc = host->unused_procs; proc; proc = proc->next) {
2819 gw_proc_waitpid(host, proc, errh);
2820 }
2821 #endif
2822 }
2823
gw_handle_trigger_exts(gw_exts * const exts,log_error_st * const errh,const int debug)2824 static void gw_handle_trigger_exts(gw_exts * const exts, log_error_st * const errh, const int debug) {
2825 for (uint32_t j = 0; j < exts->used; ++j) {
2826 gw_extension *ex = exts->exts+j;
2827 for (uint32_t n = 0; n < ex->used; ++n) {
2828 gw_handle_trigger_host(ex->hosts[n], errh, debug);
2829 }
2830 }
2831 }
2832
gw_handle_trigger_exts_wkr(gw_exts * exts,log_error_st * errh)2833 static void gw_handle_trigger_exts_wkr(gw_exts *exts, log_error_st *errh) {
2834 for (uint32_t j = 0; j < exts->used; ++j) {
2835 gw_extension * const ex = exts->exts+j;
2836 for (uint32_t n = 0; n < ex->used; ++n) {
2837 gw_host * const host = ex->hosts[n];
2838 gw_handle_trigger_host_timeouts(host);
2839 for (gw_proc *proc = host->first; proc; proc = proc->next) {
2840 if (proc->state == PROC_STATE_OVERLOADED)
2841 gw_proc_check_enable(host, proc, errh);
2842 }
2843 }
2844 }
2845 }
2846
gw_handle_trigger(server * srv,void * p_d)2847 handler_t gw_handle_trigger(server *srv, void *p_d) {
2848 gw_plugin_data * const p = p_d;
2849 int wkr = (0 != srv->srvconf.max_worker && p->srv_pid != srv->pid);
2850 log_error_st * const errh = srv->errh;
2851 int global_debug = 0;
2852
2853 if (NULL == p->cvlist) return HANDLER_GO_ON;
2854 /* (init i to 0 if global context; to 1 to skip empty global context) */
2855 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
2856 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
2857 gw_plugin_config *conf = NULL;
2858 int debug = global_debug;
2859 for (; -1 != cpv->k_id; ++cpv) {
2860 switch (cpv->k_id) {
2861 case 0: /* xxxxx.server */
2862 if (cpv->vtype == T_CONFIG_LOCAL) conf = cpv->v.v;
2863 break;
2864 case 2: /* xxxxx.debug */
2865 debug = (int)cpv->v.u;
2866 if (0 == i) global_debug = (int)cpv->v.u;
2867 default:
2868 break;
2869 }
2870 }
2871
2872 if (NULL == conf || NULL == conf->exts) continue;
2873
2874 /* (debug flag is only active if set in same scope as xxxxx.server
2875 * or global scope (for convenience))
2876 * (unable to use p->defaults.debug since gw_plugin_config
2877 * might be part of a larger plugin_config) */
2878 wkr
2879 ? gw_handle_trigger_exts_wkr(conf->exts, errh)
2880 : gw_handle_trigger_exts(conf->exts, errh, debug);
2881 }
2882
2883 return HANDLER_GO_ON;
2884 }
2885
gw_handle_waitpid_cb(server * srv,void * p_d,pid_t pid,int status)2886 handler_t gw_handle_waitpid_cb(server *srv, void *p_d, pid_t pid, int status) {
2887 gw_plugin_data * const p = p_d;
2888 if (0 != srv->srvconf.max_worker && p->srv_pid != srv->pid)
2889 return HANDLER_GO_ON;
2890 log_error_st * const errh = srv->errh;
2891 int global_debug = 0;
2892
2893 if (NULL == p->cvlist) return HANDLER_GO_ON;
2894 /* (init i to 0 if global context; to 1 to skip empty global context) */
2895 for (int i = !p->cvlist[0].v.u2[1], used = p->nconfig; i < used; ++i) {
2896 config_plugin_value_t *cpv = p->cvlist + p->cvlist[i].v.u2[0];
2897 gw_plugin_config *conf = NULL;
2898 int debug = global_debug;
2899 for (; -1 != cpv->k_id; ++cpv) {
2900 switch (cpv->k_id) {
2901 case 0: /* xxxxx.server */
2902 if (cpv->vtype == T_CONFIG_LOCAL) conf = cpv->v.v;
2903 break;
2904 case 2: /* xxxxx.debug */
2905 debug = (int)cpv->v.u;
2906 if (0 == i) global_debug = (int)cpv->v.u;
2907 default:
2908 break;
2909 }
2910 }
2911
2912 if (NULL == conf || NULL == conf->exts) continue;
2913
2914 /* (debug flag is only active if set in same scope as xxxxx.server
2915 * or global scope (for convenience))
2916 * (unable to use p->defaults.debug since gw_plugin_config
2917 * might be part of a larger plugin_config) */
2918 const unix_time64_t cur_ts = log_monotonic_secs;
2919 gw_exts *exts = conf->exts;
2920 for (uint32_t j = 0; j < exts->used; ++j) {
2921 gw_extension *ex = exts->exts+j;
2922 for (uint32_t n = 0; n < ex->used; ++n) {
2923 gw_host *host = ex->hosts[n];
2924 gw_proc *proc;
2925 for (proc = host->first; proc; proc = proc->next) {
2926 if (!proc->is_local || proc->pid != pid) continue;
2927
2928 gw_proc_waitpid_log(host, proc, errh, status);
2929 gw_proc_set_state(host, proc, PROC_STATE_DIED);
2930 proc->pid = 0;
2931
2932 /* restart, but avoid spinning if child exits too quickly */
2933 if (proc->disabled_until < cur_ts) {
2934 /*(set state PROC_STATE_DIED above, so != KILLED here)*/
2935 /*(PROC_STATE_KILLED belongs in unused_procs, anyway)*/
2936 if (proc->state != PROC_STATE_KILLED)
2937 proc->disabled_until = cur_ts;
2938 if (gw_spawn_connection(host, proc, errh, debug)) {
2939 log_error(errh, __FILE__, __LINE__,
2940 "ERROR: spawning gw failed.");
2941 }
2942 }
2943
2944 return HANDLER_FINISHED;
2945 }
2946 for (proc = host->unused_procs; proc; proc = proc->next) {
2947 if (!proc->is_local || proc->pid != pid) continue;
2948
2949 gw_proc_waitpid_log(host, proc, errh, status);
2950 if (proc->state != PROC_STATE_KILLED)
2951 proc->disabled_until = cur_ts;
2952 gw_proc_set_state(host, proc, PROC_STATE_DIED);
2953 proc->pid = 0;
2954 return HANDLER_FINISHED;
2955 }
2956 }
2957 }
2958 }
2959
2960 return HANDLER_GO_ON;
2961 }
2962