1 /*
2 * h2 - HTTP/2 protocol layer
3 *
4 * Copyright(c) 2020 Glenn Strauss gstrauss()gluelogic.com All rights reserved
5 * License: BSD 3-clause (same as lighttpd)
6 */
7 #include "first.h"
8 #include "h2.h"
9
10 #include <arpa/inet.h> /* htonl() */
11 #include <stdint.h> /* INT32_MAX INT32_MIN */
12 #include <stdlib.h>
13 #include <string.h>
14
15 #include "base.h"
16 #include "buffer.h"
17 #include "chunk.h"
18 #include "fdevent.h" /* FDEVENT_STREAM_REQUEST_BUFMIN */
19 #include "http_date.h"
20 #include "http_header.h"
21 #include "log.h"
22 #include "request.h"
23 #include "response.h" /* http_response_omit_header() */
24
25
26 /* lowercased field-names
27 * (32-byte record (power-2) and single block of memory for memory locality) */
28 static const char http_header_lc[][32] = {
29 [HTTP_HEADER_OTHER] = ""
30 ,[HTTP_HEADER_ACCEPT] = "accept"
31 ,[HTTP_HEADER_ACCEPT_ENCODING] = "accept-encoding"
32 ,[HTTP_HEADER_ACCEPT_LANGUAGE] = "accept-language"
33 ,[HTTP_HEADER_ACCEPT_RANGES] = "accept-ranges"
34 ,[HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN]="access-control-allow-origin"
35 ,[HTTP_HEADER_AGE] = "age"
36 ,[HTTP_HEADER_ALLOW] = "allow"
37 ,[HTTP_HEADER_ALT_SVC] = "alt-svc"
38 ,[HTTP_HEADER_ALT_USED] = "alt-used"
39 ,[HTTP_HEADER_AUTHORIZATION] = "authorization"
40 ,[HTTP_HEADER_CACHE_CONTROL] = "cache-control"
41 ,[HTTP_HEADER_CONNECTION] = "connection"
42 ,[HTTP_HEADER_CONTENT_ENCODING] = "content-encoding"
43 ,[HTTP_HEADER_CONTENT_LENGTH] = "content-length"
44 ,[HTTP_HEADER_CONTENT_LOCATION] = "content-location"
45 ,[HTTP_HEADER_CONTENT_RANGE] = "content-range"
46 ,[HTTP_HEADER_CONTENT_SECURITY_POLICY] = "content-security-policy"
47 ,[HTTP_HEADER_CONTENT_TYPE] = "content-type"
48 ,[HTTP_HEADER_COOKIE] = "cookie"
49 ,[HTTP_HEADER_DATE] = "date"
50 ,[HTTP_HEADER_DNT] = "dnt"
51 ,[HTTP_HEADER_ETAG] = "etag"
52 ,[HTTP_HEADER_EXPECT] = "expect"
53 ,[HTTP_HEADER_EXPECT_CT] = "expect-ct"
54 ,[HTTP_HEADER_EXPIRES] = "expires"
55 ,[HTTP_HEADER_FORWARDED] = "forwarded"
56 ,[HTTP_HEADER_HOST] = "host"
57 ,[HTTP_HEADER_HTTP2_SETTINGS] = "http2-settings"
58 ,[HTTP_HEADER_IF_MATCH] = "if-match"
59 ,[HTTP_HEADER_IF_MODIFIED_SINCE] = "if-modified-since"
60 ,[HTTP_HEADER_IF_NONE_MATCH] = "if-none-match"
61 ,[HTTP_HEADER_IF_RANGE] = "if-range"
62 ,[HTTP_HEADER_IF_UNMODIFIED_SINCE] = "if-unmodified-since"
63 ,[HTTP_HEADER_LAST_MODIFIED] = "last-modified"
64 ,[HTTP_HEADER_LINK] = "link"
65 ,[HTTP_HEADER_LOCATION] = "location"
66 ,[HTTP_HEADER_ONION_LOCATION] = "onion-location"
67 ,[HTTP_HEADER_P3P] = "p3p"
68 ,[HTTP_HEADER_PRAGMA] = "pragma"
69 ,[HTTP_HEADER_PRIORITY] = "priority"
70 ,[HTTP_HEADER_RANGE] = "range"
71 ,[HTTP_HEADER_REFERER] = "referer"
72 ,[HTTP_HEADER_REFERRER_POLICY] = "referrer-policy"
73 ,[HTTP_HEADER_SERVER] = "server"
74 ,[HTTP_HEADER_SET_COOKIE] = "set-cookie"
75 ,[HTTP_HEADER_STATUS] = "status"
76 ,[HTTP_HEADER_STRICT_TRANSPORT_SECURITY] = "strict-transport-security"
77 ,[HTTP_HEADER_TE] = "te"
78 ,[HTTP_HEADER_TRANSFER_ENCODING] = "transfer-encoding"
79 ,[HTTP_HEADER_UPGRADE] = "upgrade"
80 ,[HTTP_HEADER_UPGRADE_INSECURE_REQUESTS] = "upgrade-insecure-requests"
81 ,[HTTP_HEADER_USER_AGENT] = "user-agent"
82 ,[HTTP_HEADER_VARY] = "vary"
83 ,[HTTP_HEADER_WWW_AUTHENTICATE] = "www-authenticate"
84 ,[HTTP_HEADER_X_CONTENT_TYPE_OPTIONS] = "x-content-type-options"
85 ,[HTTP_HEADER_X_FORWARDED_FOR] = "x-forwarded-for"
86 ,[HTTP_HEADER_X_FORWARDED_PROTO] = "x-forwarded-proto"
87 ,[HTTP_HEADER_X_FRAME_OPTIONS] = "x-frame-options"
88 ,[HTTP_HEADER_X_XSS_PROTECTION] = "x-xss-protection"
89 };
90
91
92 /* future optimization: could conceivably store static XXH32() hash values for
93 * field-name (e.g. for benefit of entries marked LSHPACK_HDR_UNKNOWN) to
94 * incrementally reduce cost of calculating hash values for field-name on each
95 * request where those headers are used. Might also store single element
96 * static caches for "date:" value (updated each time static buffer is updated)
97 * and for "server:" value (often global to server), keyed on r->conf.server_tag
98 * pointer addr. HTTP_HEADER_STATUS could be overloaded for ":status", since
99 * lighttpd should not send "Status:" response header (should not happen) */
100
101 static const uint8_t http_header_lshpack_idx[] = {
102 [HTTP_HEADER_OTHER] = LSHPACK_HDR_UNKNOWN
103 ,[HTTP_HEADER_ACCEPT_ENCODING] = LSHPACK_HDR_ACCEPT_ENCODING
104 ,[HTTP_HEADER_AUTHORIZATION] = LSHPACK_HDR_AUTHORIZATION
105 ,[HTTP_HEADER_CACHE_CONTROL] = LSHPACK_HDR_CACHE_CONTROL
106 ,[HTTP_HEADER_CONNECTION] = LSHPACK_HDR_UNKNOWN
107 ,[HTTP_HEADER_CONTENT_ENCODING] = LSHPACK_HDR_CONTENT_ENCODING
108 ,[HTTP_HEADER_CONTENT_LENGTH] = LSHPACK_HDR_CONTENT_LENGTH
109 ,[HTTP_HEADER_CONTENT_LOCATION] = LSHPACK_HDR_CONTENT_LOCATION
110 ,[HTTP_HEADER_CONTENT_TYPE] = LSHPACK_HDR_CONTENT_TYPE
111 ,[HTTP_HEADER_COOKIE] = LSHPACK_HDR_COOKIE
112 ,[HTTP_HEADER_DATE] = LSHPACK_HDR_DATE
113 ,[HTTP_HEADER_ETAG] = LSHPACK_HDR_ETAG
114 ,[HTTP_HEADER_EXPECT] = LSHPACK_HDR_EXPECT
115 ,[HTTP_HEADER_FORWARDED] = LSHPACK_HDR_UNKNOWN
116 ,[HTTP_HEADER_HOST] = LSHPACK_HDR_HOST
117 ,[HTTP_HEADER_IF_MODIFIED_SINCE] = LSHPACK_HDR_IF_MODIFIED_SINCE
118 ,[HTTP_HEADER_IF_NONE_MATCH] = LSHPACK_HDR_IF_NONE_MATCH
119 ,[HTTP_HEADER_LAST_MODIFIED] = LSHPACK_HDR_LAST_MODIFIED
120 ,[HTTP_HEADER_LOCATION] = LSHPACK_HDR_LOCATION
121 ,[HTTP_HEADER_RANGE] = LSHPACK_HDR_RANGE
122 ,[HTTP_HEADER_SERVER] = LSHPACK_HDR_SERVER
123 ,[HTTP_HEADER_SET_COOKIE] = LSHPACK_HDR_SET_COOKIE
124 ,[HTTP_HEADER_STATUS] = LSHPACK_HDR_UNKNOWN
125 ,[HTTP_HEADER_TRANSFER_ENCODING] = LSHPACK_HDR_TRANSFER_ENCODING
126 ,[HTTP_HEADER_UPGRADE] = LSHPACK_HDR_UNKNOWN
127 ,[HTTP_HEADER_USER_AGENT] = LSHPACK_HDR_USER_AGENT
128 ,[HTTP_HEADER_VARY] = LSHPACK_HDR_VARY
129 ,[HTTP_HEADER_X_FORWARDED_FOR] = LSHPACK_HDR_UNKNOWN
130 ,[HTTP_HEADER_X_FORWARDED_PROTO] = LSHPACK_HDR_UNKNOWN
131 ,[HTTP_HEADER_HTTP2_SETTINGS] = LSHPACK_HDR_UNKNOWN
132 ,[HTTP_HEADER_ACCEPT] = LSHPACK_HDR_ACCEPT
133 ,[HTTP_HEADER_ACCEPT_LANGUAGE] = LSHPACK_HDR_ACCEPT_LANGUAGE
134 ,[HTTP_HEADER_ACCEPT_RANGES] = LSHPACK_HDR_ACCEPT_RANGES
135 ,[HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN]=LSHPACK_HDR_ACCESS_CONTROL_ALLOW_ORIGIN
136 ,[HTTP_HEADER_AGE] = LSHPACK_HDR_AGE
137 ,[HTTP_HEADER_ALLOW] = LSHPACK_HDR_ALLOW
138 ,[HTTP_HEADER_ALT_SVC] = LSHPACK_HDR_UNKNOWN
139 ,[HTTP_HEADER_ALT_USED] = LSHPACK_HDR_UNKNOWN
140 ,[HTTP_HEADER_CONTENT_RANGE] = LSHPACK_HDR_CONTENT_RANGE
141 ,[HTTP_HEADER_CONTENT_SECURITY_POLICY] = LSHPACK_HDR_UNKNOWN
142 ,[HTTP_HEADER_DNT] = LSHPACK_HDR_UNKNOWN
143 ,[HTTP_HEADER_EXPECT_CT] = LSHPACK_HDR_UNKNOWN
144 ,[HTTP_HEADER_EXPIRES] = LSHPACK_HDR_EXPIRES
145 ,[HTTP_HEADER_IF_MATCH] = LSHPACK_HDR_IF_MATCH
146 ,[HTTP_HEADER_IF_RANGE] = LSHPACK_HDR_IF_RANGE
147 ,[HTTP_HEADER_IF_UNMODIFIED_SINCE] = LSHPACK_HDR_IF_UNMODIFIED_SINCE
148 ,[HTTP_HEADER_LINK] = LSHPACK_HDR_LINK
149 ,[HTTP_HEADER_ONION_LOCATION] = LSHPACK_HDR_UNKNOWN
150 ,[HTTP_HEADER_P3P] = LSHPACK_HDR_UNKNOWN
151 ,[HTTP_HEADER_PRAGMA] = LSHPACK_HDR_UNKNOWN
152 ,[HTTP_HEADER_PRIORITY] = LSHPACK_HDR_UNKNOWN
153 ,[HTTP_HEADER_REFERER] = LSHPACK_HDR_REFERER
154 ,[HTTP_HEADER_REFERRER_POLICY] = LSHPACK_HDR_UNKNOWN
155 ,[HTTP_HEADER_STRICT_TRANSPORT_SECURITY] = LSHPACK_HDR_STRICT_TRANSPORT_SECURITY
156 ,[HTTP_HEADER_TE] = LSHPACK_HDR_UNKNOWN
157 ,[HTTP_HEADER_UPGRADE_INSECURE_REQUESTS] = LSHPACK_HDR_UNKNOWN
158 ,[HTTP_HEADER_WWW_AUTHENTICATE] = LSHPACK_HDR_WWW_AUTHENTICATE
159 ,[HTTP_HEADER_X_CONTENT_TYPE_OPTIONS] = LSHPACK_HDR_UNKNOWN
160 ,[HTTP_HEADER_X_FRAME_OPTIONS] = LSHPACK_HDR_UNKNOWN
161 ,[HTTP_HEADER_X_XSS_PROTECTION] = LSHPACK_HDR_UNKNOWN
162 };
163
164
165 /* Note: must be kept in sync with ls-hpack/lshpack.h:lshpack_static_hdr_idx[]*/
166 static const int8_t lshpack_idx_http_header[] = {
167 [LSHPACK_HDR_UNKNOWN] = HTTP_HEADER_H2_UNKNOWN
168 ,[LSHPACK_HDR_AUTHORITY] = HTTP_HEADER_H2_AUTHORITY
169 ,[LSHPACK_HDR_METHOD_GET] = HTTP_HEADER_H2_METHOD_GET
170 ,[LSHPACK_HDR_METHOD_POST] = HTTP_HEADER_H2_METHOD_POST
171 ,[LSHPACK_HDR_PATH] = HTTP_HEADER_H2_PATH
172 ,[LSHPACK_HDR_PATH_INDEX_HTML] = HTTP_HEADER_H2_PATH_INDEX_HTML
173 ,[LSHPACK_HDR_SCHEME_HTTP] = HTTP_HEADER_H2_SCHEME_HTTP
174 ,[LSHPACK_HDR_SCHEME_HTTPS] = HTTP_HEADER_H2_SCHEME_HTTPS
175 ,[LSHPACK_HDR_STATUS_200] = HTTP_HEADER_H2_UNKNOWN
176 ,[LSHPACK_HDR_STATUS_204] = HTTP_HEADER_H2_UNKNOWN
177 ,[LSHPACK_HDR_STATUS_206] = HTTP_HEADER_H2_UNKNOWN
178 ,[LSHPACK_HDR_STATUS_304] = HTTP_HEADER_H2_UNKNOWN
179 ,[LSHPACK_HDR_STATUS_400] = HTTP_HEADER_H2_UNKNOWN
180 ,[LSHPACK_HDR_STATUS_404] = HTTP_HEADER_H2_UNKNOWN
181 ,[LSHPACK_HDR_STATUS_500] = HTTP_HEADER_H2_UNKNOWN
182 ,[LSHPACK_HDR_ACCEPT_CHARSET] = HTTP_HEADER_OTHER
183 ,[LSHPACK_HDR_ACCEPT_ENCODING] = HTTP_HEADER_ACCEPT_ENCODING
184 ,[LSHPACK_HDR_ACCEPT_LANGUAGE] = HTTP_HEADER_ACCEPT_LANGUAGE
185 ,[LSHPACK_HDR_ACCEPT_RANGES] = HTTP_HEADER_ACCEPT_RANGES
186 ,[LSHPACK_HDR_ACCEPT] = HTTP_HEADER_ACCEPT
187 ,[LSHPACK_HDR_ACCESS_CONTROL_ALLOW_ORIGIN]=HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN
188 ,[LSHPACK_HDR_AGE] = HTTP_HEADER_AGE
189 ,[LSHPACK_HDR_ALLOW] = HTTP_HEADER_ALLOW
190 ,[LSHPACK_HDR_AUTHORIZATION] = HTTP_HEADER_AUTHORIZATION
191 ,[LSHPACK_HDR_CACHE_CONTROL] = HTTP_HEADER_CACHE_CONTROL
192 ,[LSHPACK_HDR_CONTENT_DISPOSITION] = HTTP_HEADER_OTHER
193 ,[LSHPACK_HDR_CONTENT_ENCODING] = HTTP_HEADER_CONTENT_ENCODING
194 ,[LSHPACK_HDR_CONTENT_LANGUAGE] = HTTP_HEADER_OTHER
195 ,[LSHPACK_HDR_CONTENT_LENGTH] = HTTP_HEADER_CONTENT_LENGTH
196 ,[LSHPACK_HDR_CONTENT_LOCATION] = HTTP_HEADER_CONTENT_LOCATION
197 ,[LSHPACK_HDR_CONTENT_RANGE] = HTTP_HEADER_CONTENT_RANGE
198 ,[LSHPACK_HDR_CONTENT_TYPE] = HTTP_HEADER_CONTENT_TYPE
199 ,[LSHPACK_HDR_COOKIE] = HTTP_HEADER_COOKIE
200 ,[LSHPACK_HDR_DATE] = HTTP_HEADER_DATE
201 ,[LSHPACK_HDR_ETAG] = HTTP_HEADER_ETAG
202 ,[LSHPACK_HDR_EXPECT] = HTTP_HEADER_EXPECT
203 ,[LSHPACK_HDR_EXPIRES] = HTTP_HEADER_EXPIRES
204 ,[LSHPACK_HDR_FROM] = HTTP_HEADER_OTHER
205 ,[LSHPACK_HDR_HOST] = HTTP_HEADER_HOST
206 ,[LSHPACK_HDR_IF_MATCH] = HTTP_HEADER_IF_MATCH
207 ,[LSHPACK_HDR_IF_MODIFIED_SINCE] = HTTP_HEADER_IF_MODIFIED_SINCE
208 ,[LSHPACK_HDR_IF_NONE_MATCH] = HTTP_HEADER_IF_NONE_MATCH
209 ,[LSHPACK_HDR_IF_RANGE] = HTTP_HEADER_IF_RANGE
210 ,[LSHPACK_HDR_IF_UNMODIFIED_SINCE] = HTTP_HEADER_IF_UNMODIFIED_SINCE
211 ,[LSHPACK_HDR_LAST_MODIFIED] = HTTP_HEADER_LAST_MODIFIED
212 ,[LSHPACK_HDR_LINK] = HTTP_HEADER_LINK
213 ,[LSHPACK_HDR_LOCATION] = HTTP_HEADER_LOCATION
214 ,[LSHPACK_HDR_MAX_FORWARDS] = HTTP_HEADER_OTHER
215 ,[LSHPACK_HDR_PROXY_AUTHENTICATE] = HTTP_HEADER_OTHER
216 ,[LSHPACK_HDR_PROXY_AUTHORIZATION] = HTTP_HEADER_OTHER
217 ,[LSHPACK_HDR_RANGE] = HTTP_HEADER_RANGE
218 ,[LSHPACK_HDR_REFERER] = HTTP_HEADER_REFERER
219 ,[LSHPACK_HDR_REFRESH] = HTTP_HEADER_OTHER
220 ,[LSHPACK_HDR_RETRY_AFTER] = HTTP_HEADER_OTHER
221 ,[LSHPACK_HDR_SERVER] = HTTP_HEADER_SERVER
222 ,[LSHPACK_HDR_SET_COOKIE] = HTTP_HEADER_SET_COOKIE
223 ,[LSHPACK_HDR_STRICT_TRANSPORT_SECURITY] = HTTP_HEADER_STRICT_TRANSPORT_SECURITY
224 ,[LSHPACK_HDR_TRANSFER_ENCODING] = HTTP_HEADER_TRANSFER_ENCODING
225 ,[LSHPACK_HDR_USER_AGENT] = HTTP_HEADER_USER_AGENT
226 ,[LSHPACK_HDR_VARY] = HTTP_HEADER_VARY
227 ,[LSHPACK_HDR_VIA] = HTTP_HEADER_OTHER
228 ,[LSHPACK_HDR_WWW_AUTHENTICATE] = HTTP_HEADER_WWW_AUTHENTICATE
229 };
230
231
232 static request_st * h2_init_stream (request_st * const h2r, connection * const con);
233
234
235 __attribute_pure__
236 static inline uint32_t
h2_u32(const uint8_t * const s)237 h2_u32 (const uint8_t * const s)
238 {
239 return ((uint32_t)s[0] << 24)
240 | ((uint32_t)s[1] << 16)
241 | ((uint32_t)s[2] << 8)
242 | (uint32_t)s[3];
243 }
244
245
246 __attribute_pure__
247 static inline uint32_t
h2_u31(const uint8_t * const s)248 h2_u31 (const uint8_t * const s)
249 {
250 return h2_u32(s) & ~0x80000000u;
251 }
252
253
254 __attribute_pure__
255 static inline uint32_t
h2_u24(const uint8_t * const s)256 h2_u24 (const uint8_t * const s)
257 {
258 #if 1
259 /* XXX: optimization is valid only for how this is used in h2.c
260 * where we have checked that frame header received is at least
261 * 9 chars, and where s containing frame length (3-bytes) is
262 * followed by at least 1 additional char. */
263 return h2_u32(s) >> 8;
264 #else
265 return ((uint32_t)s[0] << 16)
266 | ((uint32_t)s[1] << 8)
267 | (uint32_t)s[2];
268 #endif
269 }
270
271
272 __attribute_pure__
273 static inline uint16_t
h2_u16(const uint8_t * const s)274 h2_u16 (const uint8_t * const s)
275 {
276 return ((uint16_t)s[0] << 8)
277 | (uint16_t)s[1];
278 }
279
280
281 static void
h2_send_settings_ack(connection * const con)282 h2_send_settings_ack (connection * const con)
283 {
284 static const uint8_t settings_ack[] = {
285 /* SETTINGS w/ ACK */
286 0x00, 0x00, 0x00 /* frame length */
287 ,H2_FTYPE_SETTINGS /* frame type */
288 ,H2_FLAG_ACK /* frame flags */
289 ,0x00, 0x00, 0x00, 0x00 /* stream identifier */
290 };
291
292 chunkqueue_append_mem(con->write_queue,
293 (const char *)settings_ack, sizeof(settings_ack));
294 }
295
296
297 __attribute_cold__
298 static void
h2_send_rst_stream_id(uint32_t h2id,connection * const con,const request_h2error_t e)299 h2_send_rst_stream_id (uint32_t h2id, connection * const con, const request_h2error_t e)
300 {
301 union {
302 uint8_t c[16];
303 uint32_t u[4]; /*(alignment)*/
304 } rst_stream = { { /*(big-endian numbers)*/
305 0x00, 0x00, 0x00 /* padding for alignment; do not send */
306 /* RST_STREAM */
307 ,0x00, 0x00, 0x04 /* frame length */
308 ,H2_FTYPE_RST_STREAM /* frame type */
309 ,0x00 /* frame flags */
310 ,0x00, 0x00, 0x00, 0x00 /* stream identifier (fill in below) */
311 ,0x00, 0x00, 0x00, 0x00 /* error code; (fill in below) */
312 } };
313
314 rst_stream.u[2] = htonl(h2id);
315 rst_stream.u[3] = htonl(e);
316 chunkqueue_append_mem(con->write_queue, /*(+3 to skip over align padding)*/
317 (const char *)rst_stream.c+3, sizeof(rst_stream)-3);
318 }
319
320
321 __attribute_cold__
322 static void
h2_send_rst_stream_state(request_st * const r,h2con * const h2c)323 h2_send_rst_stream_state (request_st * const r, h2con * const h2c)
324 {
325 if (r->h2state != H2_STATE_HALF_CLOSED_REMOTE
326 && r->h2state != H2_STATE_CLOSED) {
327 /* set timestamp for comparison; not tracking individual stream ids */
328 h2c->half_closed_ts = log_monotonic_secs;
329 }
330 r->state = CON_STATE_ERROR;
331 r->h2state = H2_STATE_CLOSED;
332 }
333
334
335 __attribute_cold__
336 static void
h2_send_rst_stream(request_st * const r,connection * const con,const request_h2error_t e)337 h2_send_rst_stream (request_st * const r, connection * const con, const request_h2error_t e)
338 {
339 h2_send_rst_stream_state(r, con->h2);/*(sets r->h2state = H2_STATE_CLOSED)*/
340 h2_send_rst_stream_id(r->h2id, con, e);
341 }
342
343
344 __attribute_cold__
345 static void
h2_send_goaway_rst_stream(connection * const con)346 h2_send_goaway_rst_stream (connection * const con)
347 {
348 h2con * const h2c = con->h2;
349 const int sent_goaway = h2c->sent_goaway;
350 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
351 request_st * const r = h2c->r[i];
352 if (r->h2state == H2_STATE_CLOSED) continue;
353 h2_send_rst_stream_state(r, h2c);/*(sets r->h2state = H2_STATE_CLOSED)*/
354 /*(XXX: might consider always sending RST_STREAM)*/
355 if (sent_goaway)
356 h2_send_rst_stream_id(r->h2id, con, H2_E_PROTOCOL_ERROR);
357 }
358 }
359
360
361 void
h2_send_goaway(connection * const con,const request_h2error_t e)362 h2_send_goaway (connection * const con, const request_h2error_t e)
363 {
364 /* future: RFC 7540 Section 6.8 notes that server initiating graceful
365 * connection shutdown SHOULD send GOAWAY with stream id 2^31-1 and a
366 * NO_ERROR code, and later send another GOAWAY with an updated last
367 * stream identifier. (This is not done here, but doing so would be
368 * friendlier to clients that send streaming requests which the client
369 * is unable to retry.) */
370
371 if (e != H2_E_NO_ERROR)
372 h2_send_goaway_rst_stream(con);
373 /*XXX: else should send RST_STREAM w/ CANCEL for any active PUSH_PROMISE */
374
375 h2con * const h2c = con->h2;
376 if (h2c->sent_goaway && (h2c->sent_goaway > 0 || e == H2_E_NO_ERROR))
377 return;
378 h2c->sent_goaway = (e == H2_E_NO_ERROR) ? -1 : (int32_t)e;
379
380 union {
381 uint8_t c[20];
382 uint32_t u[5]; /*(alignment)*/
383 } goaway = { { /*(big-endian numbers)*/
384 0x00, 0x00, 0x00 /* padding for alignment; do not send */
385 /* GOAWAY */
386 ,0x00, 0x00, 0x08 /* frame length */
387 ,H2_FTYPE_GOAWAY /* frame type */
388 ,0x00 /* frame flags */
389 ,0x00, 0x00, 0x00, 0x00 /* stream identifier */
390 ,0x00, 0x00, 0x00, 0x00 /* last-stream-id (fill in below) */
391 ,0x00, 0x00, 0x00, 0x00 /* error code (fill in below) */
392 /* additional debug data (*); (optional)
393 * adjust frame length if any additional
394 * debug data is sent */
395 } };
396
397 goaway.u[3] = htonl(h2c->h2_cid); /* last-stream-id */
398 goaway.u[4] = htonl(e);
399 chunkqueue_append_mem(con->write_queue, /*(+3 to skip over align padding)*/
400 (const char *)goaway.c+3, sizeof(goaway)-3);
401 }
402
403
404 __attribute_cold__
405 static void
h2_send_goaway_e(connection * const con,const request_h2error_t e)406 h2_send_goaway_e (connection * const con, const request_h2error_t e)
407 {
408 h2_send_goaway(con, e);
409 }
410
411
412 __attribute_cold__
413 static int
h2_send_refused_stream(uint32_t h2id,connection * const con)414 h2_send_refused_stream (uint32_t h2id, connection * const con)
415 {
416 h2con * const h2c = con->h2;
417
418 if (h2c->sent_settings) { /*(see h2_recv_settings() comments)*/
419 /* client connected and immediately sent flurry of request streams
420 * (h2c->sent_settings is non-zero if sent SETTINGS frame to
421 * client and have not yet received SETTINGS ACK from client)
422 * lighttpd sends SETTINGS_MAX_CONCURRENT_STREAMS <limit> with
423 * server Connection Preface, so a well-behaved client will
424 * adjust after it sends its initial requests.
425 * (e.g. h2load -n 100 -m 100 sends 100 requests upon connect)
426 *
427 * Check if active streams have pending request body. If all active
428 * streams have pending request body, then must refuse new stream as
429 * progress might be blocked if active streams all wait for DATA. */
430 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
431 const request_st * const r = h2c->r[i];
432 if (r->reqbody_length == r->reqbody_queue.bytes_in) {
433 /* no pending request body; at least this request may proceed,
434 * though others waiting for request body may block until new
435 * request streams become active if new request streams precede
436 * DATA frames for active streams
437 *
438 * alternative to sending refused stream:
439 * stop processing frames and defer processing this HEADERS
440 * frame until num active streams drops below limit. */
441 return -1;
442 }
443 }
444 /* overload h2c->half_closed_ts to discard DATA (in h2_recv_data())
445 * from refused streams while waiting for SETTINGS ackn from client
446 * (instead of additional h2 con init time check in h2_recv_data())
447 * (though h2c->half_closed_ts is not unset when SETTINGS ackn received)
448 * (fuzzy discard; imprecise; see further comments in h2_recv_data()) */
449 h2c->half_closed_ts = h2c->sent_settings;
450 }
451
452 /* too many active streams; refuse new stream */
453 h2c->h2_cid = h2id;
454 h2_send_rst_stream_id(h2id, con, H2_E_REFUSED_STREAM);
455 return 1;
456 }
457
458
459 static int
h2_recv_goaway(connection * const con,const uint8_t * const s,uint32_t len)460 h2_recv_goaway (connection * const con, const uint8_t * const s, uint32_t len)
461 {
462 /*(s must be entire GOAWAY frame and len the frame length field)*/
463 /*assert(s[3] == H2_FTYPE_GOAWAY);*/
464 if (len < 8) { /*(GOAWAY frame length must be >= 8)*/
465 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
466 return 0;
467 }
468 if (0 != h2_u31(s+5)) { /*(GOAWAY stream id must be 0)*/
469 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
470 return 0;
471 }
472 const uint32_t e = h2_u32(s+13);
473 #if 0
474 /* XXX: debug: could log error code sent by peer */
475 #endif
476 #if 0
477 /* XXX: debug: could log additional debug info (if any) sent by peer */
478 if (len > 8) {
479 }
480 #endif
481 #if 0
482 /* XXX: could validate/use Last-Stream-ID sent by peer */
483 const uint32_t last_id = h2_u31(s+9);
484 #endif
485
486 /* send PROTOCOL_ERROR back to peer if peer sent an error code
487 * (i.e. not NO_ERROR) in order to terminate connection more quickly */
488 h2_send_goaway(con, e==H2_E_NO_ERROR ? H2_E_NO_ERROR : H2_E_PROTOCOL_ERROR);
489 h2con * const h2c = con->h2;
490 if (0 == h2c->rused) return 0;
491 return 1;
492 }
493
494
495 static void
h2_recv_rst_stream(connection * const con,const uint8_t * const s,const uint32_t len)496 h2_recv_rst_stream (connection * const con, const uint8_t * const s, const uint32_t len)
497 {
498 /*(s must be entire RST_STREAM frame and len the frame length field)*/
499 /*assert(s[3] == H2_FTYPE_RST_STREAM);*/
500 if (4 != len) { /*(RST_STREAM frame length must be 4)*/
501 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
502 return;
503 }
504 const uint32_t id = h2_u31(s+5);
505 if (0 == id) { /*(RST_STREAM id must not be 0)*/
506 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
507 return;
508 }
509 h2con * const h2c = con->h2;
510 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
511 request_st * const r = h2c->r[i];
512 if (r->h2id != id) continue;
513 if (r->h2state == H2_STATE_IDLE) {
514 /*(RST_STREAM must not be for stream in "idle" state)*/
515 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
516 return;
517 }
518 /* XXX: ? add debug trace including error code from RST_STREAM ? */
519 r->state = CON_STATE_ERROR;
520 r->h2state = H2_STATE_CLOSED;
521 return;
522 }
523 /* unknown/inactive stream id
524 * XXX: how should we handle RST_STREAM for unknown/inactive stream id?
525 * (stream id may have been closed recently and server forgot about it,
526 * but client (peer) sent RST_STREAM prior to receiving stream end from
527 * server)*/
528 #if 0
529 if (h2c->sent_goaway && h2c->h2_cid < id) return;
530 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
531 #else
532 if (h2c->h2_cid < id) {
533 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
534 return;
535 }
536 #endif
537 }
538
539
540 static void
h2_recv_ping(connection * const con,uint8_t * const s,const uint32_t len)541 h2_recv_ping (connection * const con, uint8_t * const s, const uint32_t len)
542 {
543 #if 0
544 union {
545 uint8_t c[20];
546 uint32_t u[5]; /*(alignment)*/
547 } ping = { { /*(big-endian numbers)*/
548 0x00, 0x00, 0x00 /* padding for alignment; do not send */
549 /* PING */
550 ,0x00, 0x00, 0x08 /* frame length */
551 ,H2_FTYPE_PING /* frame type */
552 ,H2_FLAG_ACK /* frame flags */
553 ,0x00, 0x00, 0x00, 0x00 /* stream identifier */
554 ,0x00, 0x00, 0x00, 0x00 /* opaque (fill in below) */
555 ,0x00, 0x00, 0x00, 0x00
556 } };
557 #endif
558
559 /*(s must be entire PING frame and len the frame length field)*/
560 /*assert(s[3] == H2_FTYPE_PING);*/
561 if (8 != len) { /*(PING frame length must be 8)*/
562 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
563 return;
564 }
565 s[5] &= ~0x80; /* reserved bit must be ignored */
566 if (0 != h2_u31(s+5)) { /*(PING stream id must be 0)*/
567 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
568 return;
569 }
570 if (s[4] & H2_FLAG_ACK) /*(ignore; unexpected if we did not send PING)*/
571 return;
572 /* reflect PING back to peer with frame flag ACK */
573 /* (9 byte frame header plus 8 byte PING payload = 17 bytes)*/
574 s[4] = H2_FLAG_ACK;
575 chunkqueue_append_mem(con->write_queue, (const char *)s, 17);
576 }
577
578
579 static void
h2_apply_priority_update(h2con * const h2c,const request_st * const r,const uint32_t rpos)580 h2_apply_priority_update (h2con * const h2c, const request_st * const r, const uint32_t rpos)
581 {
582 const request_st ** const rr = (const request_st **)h2c->r;
583 uint32_t npos = rpos;
584 while (npos
585 && (rr[npos-1]->h2_prio > r->h2_prio
586 || (rr[npos-1]->h2_prio == r->h2_prio
587 && rr[npos-1]->h2id > r->h2id)))
588 --npos;
589 if (rpos - npos) {
590 memmove(rr+npos+1, rr+npos, (rpos - npos)*sizeof(request_st *));
591 }
592 else {
593 while (npos+1 < h2c->rused
594 && (rr[npos+1]->h2_prio < r->h2_prio
595 || (rr[npos+1]->h2_prio == r->h2_prio
596 && rr[npos+1]->h2id < r->h2id)))
597 ++npos;
598 if (npos - rpos == 0)
599 return; /*(no movement)*/
600 memmove(rr+rpos, rr+rpos+1, (npos - rpos)*sizeof(request_st *));
601 }
602 rr[npos] = r;
603 }
604
605
606 __attribute_noinline__
__attribute_nonnull__()607 __attribute_nonnull__()
608 __attribute_pure__
609 static uint8_t
610 h2_parse_priority_update (const char * const prio, const uint32_t len)
611 {
612 /* parse priority string (structured field values: dictionary)
613 * (resets urgency (u) and incremental (i) to defaults if len == 0)
614 * (upon parse error, cease parsing and use defaults for remaining items) */
615 int urg = 3, incr = 0;
616 for (uint32_t i = 0; i < len; ++i) {
617 if (prio[i] == ' ' || prio[i] == '\t' || prio[i] == ',') continue;
618 if (prio[i] == 'u') { /* value: 0 - 7 */
619 if (i+2 < len && prio[i+1] == '=') {
620 if ((uint32_t)(prio[i+2] - '0') < 8)
621 urg = prio[i+2] - '0';
622 else
623 break; /* cease parsing if invalid syntax */
624 i += 2;
625 }
626 else
627 break; /* cease parsing if invalid syntax */
628 }
629 if (prio[i] == 'i') { /* value: 0 or 1 (boolean) */
630 if (i+3 < len && prio[i+1] == '=' && prio[i+2] == '?') {
631 if ((uint32_t)(prio[i+3] - '0') <= 1) /* 0 or 1 */
632 incr = prio[i+3] - '0';
633 else
634 break; /* cease parsing if invalid syntax */
635 i += 3;
636 }
637 else if (i+1 == len
638 || prio[i+1]==' ' || prio[i+1]=='\t' || prio[i+1]==',')
639 incr = 1;
640 else
641 break; /* cease parsing if invalid syntax */
642 }
643 do { ++i; } while (i < len && prio[i] != ','); /*advance to next token*/
644 }
645 /* combine priority 'urgency' value and invert 'incremental' boolean
646 * for easy (ascending) sorting by urgency and then incremental before
647 * non-incremental */
648 return (uint8_t)(urg << 1 | !incr);
649 }
650
651
652 static void
h2_recv_priority_update(connection * const con,const uint8_t * const s,const uint32_t len)653 h2_recv_priority_update (connection * const con, const uint8_t * const s, const uint32_t len)
654 {
655 /*(s must be entire PRIORITY_UPDATE frame and len the frame length field)*/
656 /*assert(s[3] == H2_FTYPE_PRIORITY_UPDATE);*/
657 if (len < 4) { /*(PRIORITY_UPDATE frame len must be >=4)*/
658 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
659 return;
660 }
661 const uint32_t id = h2_u31(s+5);
662 if (0 != id) { /*(PRIORITY_UPDATE id must be 0)*/
663 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
664 return;
665 }
666 const uint32_t prid = h2_u31(s+9);
667 if (0 == prid) { /*(prioritized stream id must not be 0)*/
668 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
669 return;
670 }
671 h2con * const h2c = con->h2;
672 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
673 request_st * const r = h2c->r[i];
674 if (r->h2id != prid) continue;
675 uint8_t prio = h2_parse_priority_update((char *)s+13, len-4);
676 if (r->h2_prio != prio) {
677 r->h2_prio = prio;
678 h2_apply_priority_update(h2c, r, i);
679 }
680 return;
681 }
682 #if 0
683 /*(note: not checking if prid applies to PUSH_PROMISE ids; unused in h2.c)*/
684 if (h2c->sent_goaway)
685 return;
686 if (h2c->h2_cid < prid) {
687 /* TODO: parse out urgency and incremental values,
688 * and then save for prid of future stream
689 * (see h2_recv_headers() for where to check and apply)
690 * (ignore for now; probably more worthwhile to do in HTTP/3;
691 * in HTTP/2, client might sent PRIORITY_UPDATE before HEADERS,
692 * but that is not handled here, and is not expected since the
693 * Priority request header can be used instead.) */
694 return;
695 }
696 #endif
697 /*(choosing to ignore frames for unmatched prid)*/
698 }
699
700
701 __attribute_cold__
702 __attribute_noinline__
703 static void
h2_recv_priority(connection * const con,const uint8_t * const s,const uint32_t len)704 h2_recv_priority (connection * const con, const uint8_t * const s, const uint32_t len)
705 {
706 /*(s must be entire PRIORITY frame and len the frame length field)*/
707 /*assert(s[3] == H2_FTYPE_PRIORITY);*/
708 if (5 != len) { /*(PRIORITY frame length must be 5)*/
709 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
710 return;
711 }
712 const uint32_t id = h2_u31(s+5);
713 if (0 == id) { /*(PRIORITY id must not be 0)*/
714 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
715 return;
716 }
717 const uint32_t prio = h2_u31(s+9);
718 #if 0
719 uint32_t exclusive_dependency = (s[9] & 0x80) ? 1 : 0;
720 /*(ignore dependency prid and exclusive_dependency,
721 * and attempt to scale PRIORITY weight (weight+1 default is 16)
722 * to PRIORITY_UPDATE (default urgency 3) (see h2_init_stream()))*/
723 uint8_t weight = s[13] >> 2;
724 weight = ((weight < 8 ? weight : 7) << 1) | !0;
725 #endif
726 h2con * const h2c = con->h2;
727 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
728 request_st * const r = h2c->r[i];
729 if (r->h2id != id) continue;
730 /* XXX: TODO: update priority info */
731 if (prio == id) {
732 h2_send_rst_stream(r, con, H2_E_PROTOCOL_ERROR);
733 return;
734 }
735 #if 0
736 else if (r->h2_prio != weight) {
737 r->h2_prio = weight;
738 h2_apply_priority_update(h2c, r, i);
739 }
740 #endif
741 return;
742 }
743 /* XXX: TODO: update priority info for unknown/inactive stream */
744 /*if (h2c->sent_goaway && h2c->h2_cid < id) return;*/
745 if (prio == id) {
746 h2_send_rst_stream_id(id, con, H2_E_PROTOCOL_ERROR);
747 return;
748 }
749 }
750
751
752 static void
h2_recv_window_update(connection * const con,const uint8_t * const s,const uint32_t len)753 h2_recv_window_update (connection * const con, const uint8_t * const s, const uint32_t len)
754 {
755 /*(s must be entire WINDOW_UPDATE frame and len the frame length field)*/
756 /*assert(s[3] == H2_FTYPE_WINDOW_UPDATE);*/
757 if (4 != len) { /*(WINDOW_UPDATE frame length must be 4)*/
758 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
759 return;
760 }
761 const uint32_t id = h2_u31(s+5);
762 const int32_t v = (int32_t)h2_u31(s+9);
763 request_st *r = NULL;
764 if (0 == id)
765 r = &con->request;
766 else {
767 h2con * const h2c = con->h2;
768 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
769 request_st * const rr = h2c->r[i];
770 if (rr->h2id != id) continue;
771 r = rr;
772 break;
773 }
774 /* peer should not send WINDOW_UPDATE for an inactive stream,
775 * but RFC 7540 does not explicitly call this out. On the other hand,
776 * since there may be a temporary mismatch in stream state between
777 * peers, ignore window update if stream id is unknown/inactive.
778 * Also, it is not an error if GOAWAY sent and h2c->h2_cid < id */
779 if (NULL == r) {
780 if (h2c->h2_cid < id && 0 == h2c->sent_goaway)
781 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
782 #if 0
783 /*(needed for h2spec if testing with response < 16k+1 over TLS
784 * or response <= socket send buffer size over cleartext, due to
785 * completing response too quickly for the test frame sequence) */
786 if (v == 0) /* h2spec: 6.9-2 (after we retired id 1) */
787 h2_send_rst_stream_id(id, con, H2_E_PROTOCOL_ERROR);
788 if (v == INT32_MAX)/* h2spec: 6.9.1-3 (after we retired id 1) */
789 h2_send_rst_stream_id(id, con, H2_E_FLOW_CONTROL_ERROR);
790 #endif
791 return;
792 }
793 /* MUST NOT be treated as error if stream is in closed state; ignore */
794 if (r->h2state == H2_STATE_CLOSED
795 || r->h2state == H2_STATE_HALF_CLOSED_LOCAL) return;
796 }
797 if (0 == v || r->h2_swin > INT32_MAX - v) {
798 request_h2error_t e = (0 == v)
799 ? H2_E_PROTOCOL_ERROR
800 : H2_E_FLOW_CONTROL_ERROR;
801 if (0 == id)
802 h2_send_goaway_e(con, e);
803 else
804 h2_send_rst_stream(r, con, e);
805 return;
806 }
807 r->h2_swin += v;
808 }
809
810
811 static void
h2_send_window_update(connection * const con,uint32_t h2id,const uint32_t len)812 h2_send_window_update (connection * const con, uint32_t h2id, const uint32_t len)
813 {
814 if (0 == len) return;
815 union {
816 uint8_t c[16];
817 uint32_t u[4]; /*(alignment)*/
818 } window_upd = { { /*(big-endian numbers)*/
819 0x00, 0x00, 0x00 /* padding for alignment; do not send */
820 /* WINDOW_UPDATE */
821 ,0x00, 0x00, 0x04 /* frame length */
822 ,H2_FTYPE_WINDOW_UPDATE /* frame type */
823 ,0x00 /* frame flags */
824 ,0x00, 0x00, 0x00, 0x00 /* stream identifier (fill in below) */
825 ,0x00, 0x00, 0x00, 0x00 /* window update increase (fill in below) */
826 } };
827
828 window_upd.u[2] = htonl(h2id);
829 window_upd.u[3] = htonl(len);
830 chunkqueue_append_mem(con->write_queue, /*(+3 to skip over align padding)*/
831 (const char *)window_upd.c+3, sizeof(window_upd)-3);
832 }
833
834
835 __attribute_noinline__
836 static void
h2_send_window_update_unit(connection * const con,request_st * const r,const uint32_t len)837 h2_send_window_update_unit (connection * const con, request_st * const r, const uint32_t len)
838 {
839 r->h2_rwin_fudge -= (int16_t)len;
840 if (r->h2_rwin_fudge < 0) {
841 r->h2_rwin_fudge += 16384;
842 h2_send_window_update(con, r->h2id, 16384); /*(r->h2_rwin)*/
843 }
844 }
845
846
847 static void
h2_parse_frame_settings(connection * const con,const uint8_t * s,uint32_t len)848 h2_parse_frame_settings (connection * const con, const uint8_t *s, uint32_t len)
849 {
850 /*(s and len must be SETTINGS frame payload)*/
851 /*(caller must validate frame len, frame type == 0x04, frame id == 0)*/
852 h2con * const h2c = con->h2;
853 for (; len >= 6; len -= 6, s += 6) {
854 uint32_t v = h2_u32(s+2);
855 switch (h2_u16(s)) {
856 case H2_SETTINGS_HEADER_TABLE_SIZE:
857 /* encoder may use any table size <= value sent by peer */
858 /* For simple compliance with RFC and constrained memory use,
859 * choose to not increase table size beyond the default 4096,
860 * but allow smaller sizes to be set and then reset up to 4096,
861 * e.g. set to 0 to evict all dynamic table entries,
862 * and then set to 4096 to restore dynamic table use */
863 if (v > 4096) v = 4096;
864 if (v == h2c->s_header_table_size) break;
865 h2c->s_header_table_size = v;
866 lshpack_enc_set_max_capacity(&h2c->encoder, v);
867 break;
868 case H2_SETTINGS_ENABLE_PUSH:
869 if ((v|1) != 1) { /*(v == 0 || v == 1)*/
870 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
871 return;
872 }
873 h2c->s_enable_push = v;
874 break;
875 case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
876 h2c->s_max_concurrent_streams = v;
877 break;
878 case H2_SETTINGS_INITIAL_WINDOW_SIZE:
879 if (v > INT32_MAX) { /*(2^31 - 1)*/
880 h2_send_goaway_e(con, H2_E_FLOW_CONTROL_ERROR);
881 return;
882 }
883 else if (h2c->rused) { /*(update existing streams)*/
884 /*(underflow is ok; unsigned integer math)*/
885 /*(h2c->s_initial_window_size is >= 0)*/
886 int32_t diff =
887 (int32_t)((uint32_t)v - (uint32_t)h2c->s_initial_window_size);
888 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
889 request_st * const r = h2c->r[i];
890 const int32_t swin = r->h2_swin;
891 if (r->h2state == H2_STATE_HALF_CLOSED_LOCAL
892 || r->h2state == H2_STATE_CLOSED) continue;
893 if (diff >= 0
894 ? swin > INT32_MAX - diff
895 : swin < INT32_MIN - diff) {
896 h2_send_rst_stream(r, con, H2_E_FLOW_CONTROL_ERROR);
897 continue;
898 }
899 r->h2_swin += diff;
900 }
901 }
902 h2c->s_initial_window_size = (int32_t)v;
903 break;
904 case H2_SETTINGS_MAX_FRAME_SIZE:
905 if (v < 16384 || v > 16777215) { /*[(2^14),(2^24-1)]*/
906 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
907 return;
908 }
909 h2c->s_max_frame_size = v;
910 break;
911 case H2_SETTINGS_MAX_HEADER_LIST_SIZE:
912 h2c->s_max_header_list_size = v;
913 break;
914 default:
915 break;
916 }
917 }
918
919 if (len) {
920 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
921 return;
922 }
923
924 /* caller must send SETTINGS frame with ACK flag,
925 * if appropriate, and if h2c->sent_goaway is not set
926 * (Do not send ACK for Upgrade: h2c and HTTP2-Settings header) */
927 }
928
929
930 static void
h2_recv_settings(connection * const con,const uint8_t * const s,const uint32_t len)931 h2_recv_settings (connection * const con, const uint8_t * const s, const uint32_t len)
932 {
933 /*(s must be entire SETTINGS frame, len must be the frame length field)*/
934 /*assert(s[3] == H2_FTYPE_SETTINGS);*/
935 if (0 != h2_u31(s+5)) {/*(SETTINGS stream id must be 0)*/
936 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
937 return;
938 }
939
940 h2con * const h2c = con->h2;
941 if (!(s[4] & H2_FLAG_ACK)) {
942 h2_parse_frame_settings(con, s+9, len);
943 if (h2c->sent_goaway <= 0)
944 h2_send_settings_ack(con);
945 }
946 else {
947 /* lighttpd currently sends SETTINGS in server preface, and not again,
948 * so this does not have to handle another SETTINGS frame being sent
949 * before receiving an ACK from prior SETTINGS frame. (If it does,
950 * then we will need some sort of counter.) */
951 if (0 != len)
952 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
953 else if (h2c->sent_settings)
954 h2c->sent_settings = 0;
955 else /* SETTINGS with ACK for SETTINGS frame we did not send */
956 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
957 }
958 }
959
960
961 static int
h2_recv_end_data(request_st * const r,connection * const con,const uint32_t alen)962 h2_recv_end_data (request_st * const r, connection * const con, const uint32_t alen)
963 {
964 chunkqueue * const reqbody_queue = &r->reqbody_queue;
965 r->h2state = (r->h2state == H2_STATE_OPEN)
966 ? H2_STATE_HALF_CLOSED_REMOTE
967 : H2_STATE_CLOSED;
968 if (r->reqbody_length == -1)
969 r->reqbody_length = reqbody_queue->bytes_in + (off_t)alen;
970 else if (r->reqbody_length != reqbody_queue->bytes_in + (off_t)alen) {
971 if (0 == reqbody_queue->bytes_out) {
972 h2_send_rst_stream(r, con, H2_E_PROTOCOL_ERROR);
973 return 0;
974 } /* else let reqbody streaming consumer handle truncated reqbody */
975 }
976
977 return 1;
978 }
979
980
981 static int
h2_recv_data(connection * const con,const uint8_t * const s,const uint32_t len)982 h2_recv_data (connection * const con, const uint8_t * const s, const uint32_t len)
983 {
984 /*(s must be entire DATA frame, len must be the frame length field)*/
985 /*assert(s[3] == H2_FTYPE_DATA);*/
986
987 /* future: consider string refs rather than copying DATA from chunkqueue
988 * or try to consume entire chunk, or to split chunks with less copying */
989
990 h2con * const h2c = con->h2;
991 const uint32_t id = h2_u31(s+5);
992 if (0 == id || h2c->h2_cid < id) { /*(RST_STREAM id must not be 0)*/
993 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
994 return 0;
995 }
996
997 uint32_t alen = len; /* actual data len, minus padding */
998 uint32_t pad = 0;
999 if (s[4] & H2_FLAG_PADDED) {
1000 pad = s[9];
1001 if (pad >= len) {
1002 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1003 return 0;
1004 }
1005 alen -= (1 + pad);
1006 }
1007
1008 request_st * const h2r = &con->request;
1009 if (h2r->h2_rwin <= 0 && 0 != alen) { /*(always proceed if 0 == alen)*/
1010 /*(connection_state_machine_h2() must ensure con is rescheduled,
1011 * when backends consume data if con->read_queue is not empty,
1012 * whether or not con->fd has data to read from the network)*/
1013 /*(leave frame in cq to be re-read later)*/
1014 return 0;
1015 }
1016 /*(allow h2r->h2_rwin to dip below 0 so that entire frame is processed)*/
1017 /*(not worried about underflow while
1018 * SETTINGS_MAX_FRAME_SIZE is small (e.g. 16k or 32k) and
1019 * SETTINGS_MAX_CONCURRENT_STREAMS is small (h2c->r[8]))*/
1020 /*h2r->h2_rwin -= (int32_t)len;*//* update connection recv window (below) */
1021
1022 request_st *r = NULL;
1023 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
1024 request_st * const rr = h2c->r[i];
1025 if (rr->h2id != id) continue;
1026 r = rr;
1027 break;
1028 }
1029 chunkqueue * const cq = con->read_queue;
1030 if (NULL == r) {
1031 /* simplistic heuristic to discard additional DATA from recently-closed
1032 * streams (or half-closed (local)), where recently-closed here is
1033 * within 2-3 seconds of any (other) stream being half-closed (local)
1034 * or reset before that (other) stream received END_STREAM from peer.
1035 * (e.g. clients might fire off POST request followed by DATA,
1036 * and a response might be sent before processing DATA frames)
1037 * (id <= h2c->h2_cid) already checked above, else H2_E_PROTOCOL_ERROR
1038 * If the above conditions do not hold, then send GOAWAY to attempt to
1039 * reduce the chance of becoming an infinite data sink for misbehaving
1040 * clients, though remaining streams are still handled before the
1041 * connection is closed. */
1042 chunkqueue_mark_written(cq, 9+len);
1043 if (h2c->half_closed_ts + 2 >= log_monotonic_secs) {
1044 h2_send_window_update_unit(con, h2r, len); /*(h2r->h2_rwin)*/
1045 return 1;
1046 }
1047 else {
1048 if (!h2c->sent_goaway && 0 != alen)
1049 h2_send_goaway_e(con, H2_E_NO_ERROR);
1050 return 0;
1051 }
1052 }
1053
1054 if (r->h2state == H2_STATE_CLOSED
1055 || r->h2state == H2_STATE_HALF_CLOSED_REMOTE) {
1056 h2_send_rst_stream_id(id, con, H2_E_STREAM_CLOSED);
1057 chunkqueue_mark_written(cq, 9+len);
1058 h2_send_window_update_unit(con, h2r, len); /*(h2r->h2_rwin)*/
1059 return 1;
1060 }
1061
1062 if (r->h2_rwin <= 0 && 0 != alen) {/*(always proceed if 0==alen)*/
1063 /* note: r->h2_rwin is not adjusted (below) if max_request_size exceeded
1064 * in order to read and discard h2_rwin amount of data (below) */
1065 if (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BUFMIN) {
1066 /*(connection_state_machine_h2() must ensure con is rescheduled,
1067 * when backends consume data if con->read_queue is not empty,
1068 * whether or not con->fd has data to read from the network)*/
1069 /*(leave frame in cq to be re-read later)*/
1070 return 0;
1071 }
1072 }
1073 /*(allow r->h2_rwin to dip below 0 so that entire frame is processed)*/
1074 /*(underflow will not occur (with reasonable SETTINGS_MAX_FRAME_SIZE used)
1075 * since windows updated elsewhere and data is streamed to temp files if
1076 * not FDEVENT_STREAM_REQUEST_BUFMIN)*/
1077 /*r->h2_rwin -= (int32_t)len;*/
1078 /*h2_send_window_update_unit(con, r, len);*//*(r->h2_rwin)*//*(see below)*/
1079
1080 /* avoid sending small WINDOW_UPDATE frames
1081 * Pre-emptively increase window size up to 16k (default max frame size)
1082 * and then defer small window updates until the excess is utilized. */
1083 h2_send_window_update_unit(con, h2r, len); /*(h2r->h2_rwin)*/
1084
1085 chunkqueue * const dst = &r->reqbody_queue;
1086
1087 if (r->reqbody_length >= 0 && r->reqbody_length < dst->bytes_in + alen) {
1088 /* data exceeds Content-Length specified (client mistake) */
1089 #if 0 /* truncate */
1090 alen = r->reqbody_length - dst->bytes_in;
1091 /*(END_STREAM may follow in 0-length DATA frame or HEADERS (trailers))*/
1092 #else /* reject */
1093 h2_send_rst_stream(r, con, H2_E_PROTOCOL_ERROR);
1094 chunkqueue_mark_written(cq, 9+len);
1095 return 1;
1096 #endif
1097 }
1098
1099 /*(accounting for mod_accesslog and mod_rrdtool)*/
1100 chunkqueue * const rq = &r->read_queue;
1101 rq->bytes_in += (off_t)alen;
1102 rq->bytes_out += (off_t)alen;
1103
1104 uint32_t wupd = 0;
1105 if (s[4] & H2_FLAG_END_STREAM) {
1106 if (!h2_recv_end_data(r, con, alen)) {
1107 chunkqueue_mark_written(cq, 9+len);
1108 return 1;
1109 }
1110 /*(accept data if H2_FLAG_END_STREAM was just received,
1111 * regardless of r->conf.max_request_size setting)*/
1112 }
1113 else if (0 == r->conf.max_request_size)
1114 wupd = len;
1115 else {
1116 /* r->conf.max_request_size is in kBytes */
1117 const off_t max_request_size = (off_t)r->conf.max_request_size << 10;
1118 off_t n = max_request_size - dst->bytes_in - (off_t)alen;
1119 int32_t rwin = r->h2_rwin - (int32_t)len;
1120 if (rwin < 0) rwin = 0;
1121 if (__builtin_expect( (n >= 0), 1)) /*(force wupd below w/ +16384)*/
1122 wupd=n>=rwin ? (n-=rwin)>(int32_t)len ? len : (uint32_t)n+16384 : 0;
1123 else if (-n > 65536 || 0 == r->http_status) {
1124 if (0 == r->http_status) {
1125 r->http_status = 413; /* Payload Too Large */
1126 r->handler_module = NULL;
1127 log_error(r->conf.errh, __FILE__, __LINE__,
1128 "request-size too long: %lld -> 413",
1129 (long long) (dst->bytes_in + (off_t)alen));
1130 }
1131 else { /* if (-n > 65536) */
1132 /* tolerate up to 64k additional data before resetting stream
1133 * (in excess to window updates sent to client)
1134 * (attempt to sink data in kernel buffers so 413 can be sent)*/
1135 h2_send_rst_stream_id(id, con, H2_E_STREAM_CLOSED);
1136 }
1137 chunkqueue_mark_written(cq, 9+len);
1138 return 1;
1139 }
1140 }
1141 /* r->h2_rwin is intentionally unmodified here so that some data in excess
1142 * of max_request_size received and discarded. If r->h2_rwin use is changed
1143 * in future and might reach 0, then also need to make sure that we do not
1144 * spin re-processing con while waiting for backend to consume request body.
1145 * stream rwin is always updated, potentially more than max_request_size so
1146 * that too much data is detected, instead of waiting for read timeout. */
1147 /*r->h2_rwin -= (int32_t)len;*/
1148 /*r->h2_rwin += (int32_t)wupd;*/
1149 /* avoid sending small WINDOW_UPDATE frames
1150 * Pre-emptively increase window size up to 16k (default max frame size)
1151 * and then defer small window updates until the excess is utilized.
1152 * This aims to reduce degenerative behavior from clients sending an
1153 * increasing number of tiny DATA frames. */
1154 /*(note: r->h2_rwin is not adjusted with r->h2_rwin_fudge factor)*/
1155 h2_send_window_update_unit(con, r, wupd);
1156
1157 chunkqueue_mark_written(cq, 9 + ((s[4] & H2_FLAG_PADDED) ? 1 : 0));
1158
1159 #if 0
1160 if (pad) {
1161 /* XXX: future optimization: if data is at end of chunk, then adjust
1162 * size of chunk by reducing c->mem->used to avoid copying chunk
1163 * when it is split (below) since the split would be due to padding
1164 * (also adjust cq->bytes_out)*/
1165 /*(might quickly check 9+len == cqlen if cqlen passed in as param)*/
1166 /*(then check if cq->last contains all of padding, or leave alone)*/
1167 /*(if handled here, then set pad = 0 here)*/
1168 }
1169 #endif
1170
1171 /*(similar decision logic to that in http_chunk_uses_tempfile())*/
1172 const chunk * const c = dst->last;
1173 if ((c && c->type == FILE_CHUNK && c->file.is_temp)
1174 || chunkqueue_length(dst) + alen > 65536) {
1175 log_error_st * const errh = r->conf.errh;
1176 if (0 != chunkqueue_steal_with_tempfiles(dst, cq, (off_t)alen, errh)) {
1177 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
1178 return 0;
1179 }
1180 }
1181 else
1182 chunkqueue_steal(dst, cq, (off_t)alen);
1183
1184 if (pad)
1185 chunkqueue_mark_written(cq, pad);
1186 return 1;
1187 }
1188
1189
1190 __attribute_cold__
1191 static uint32_t
h2_frame_cq_compact(chunkqueue * const cq,uint32_t len)1192 h2_frame_cq_compact (chunkqueue * const cq, uint32_t len)
1193 {
1194 /*(marked cold since most frames not expect to cross chunk boundary)*/
1195
1196 /* caller must guarantee that chunks in chunkqueue are all MEM_CHUNK */
1197 /* caller should check (chunkqueue_length(cq) >= len) before calling,
1198 * or should check that returned value >= len */
1199
1200 chunkqueue_compact_mem(cq, len);
1201 return buffer_clen(cq->first->mem) - (uint32_t)cq->first->offset;
1202 }
1203
1204
1205 __attribute_cold__
1206 static uint32_t
h2_recv_continuation(uint32_t n,uint32_t clen,const off_t cqlen,chunkqueue * const cq,connection * const con)1207 h2_recv_continuation (uint32_t n, uint32_t clen, const off_t cqlen, chunkqueue * const cq, connection * const con)
1208 {
1209 chunk *c = cq->first;
1210 uint8_t *s = (uint8_t *)(c->mem->ptr + c->offset);
1211 uint32_t m = n;
1212 uint32_t flags;
1213 h2con * const h2c = con->h2;
1214 const uint32_t fsize = h2c->s_max_frame_size;
1215 const uint32_t id = h2_u31(s+5);
1216 do {
1217 if (cqlen < n+9) return n+9; /* incomplete frame; go on */
1218 if (clen < n+9) {
1219 clen = h2_frame_cq_compact(cq, n+9);
1220 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1221 s = (uint8_t *)(c->mem->ptr + c->offset);
1222 }
1223 if (s[n+3] != H2_FTYPE_CONTINUATION) {
1224 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1225 return 0;
1226 }
1227 flags = s[n+4];
1228 const uint32_t flen = h2_u24(s+n);
1229 if (id != h2_u32(s+n+5)) {
1230 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1231 return 0;
1232 }
1233 if (flen > fsize) {
1234 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
1235 return 0;
1236 }
1237 n += 9+flen;
1238 if (n >= 65536) { /*(very oversized for hpack)*/
1239 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
1240 return 0;
1241 }
1242 if (clen < n) {
1243 clen = h2_frame_cq_compact(cq, n);
1244 if (clen < n) return n; /* incomplete frame; go on */
1245 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1246 s = (uint8_t *)(c->mem->ptr + c->offset);
1247 }
1248 } while (!(flags & H2_FLAG_END_HEADERS));
1249
1250 /* If some CONTINUATION frames were concatenated to earlier frames while
1251 * processing above, but END_HEADERS were not received, then the next time
1252 * data was read, initial frame size might exceed SETTINGS_MAX_FRAME_SIZE.
1253 * (This describes the current lighttpd implementation in h2_parse_frames())
1254 * While a flag could be set and checked to avoid this, such situations of
1255 * large HEADERS (and CONTINUATION) across multiple network reads is
1256 * expected to be rare. Reparse and concatenate below.
1257 *
1258 * Aside: why would the authors of RFC 7540 go through the trouble of
1259 * creating a CONTINUATION frame that must be special-cased when use of
1260 * CONTINUATION is so restricted e.g. no other intervening frames and
1261 * that HEADERS and PUSH_PROMISE HPACK must be parsed as a single block?
1262 * IMHO, it would have been simpler to avoid CONTINUATION entirely, and have
1263 * a special-case for HEADERS and PUSH_PROMISE to be allowed to exceed
1264 * SETTINGS_MAX_FRAME_SIZE with implementations providing a different limit.
1265 * While intermediates would not know such a limit of origin servers,
1266 * there could have been a reasonable default set with a different SETTINGS
1267 * parameter aimed just at HEADERS and PUSH_PROMISE. The parameter
1268 * SETTINGS_MAX_HEADER_LIST_SIZE could even have been (re)used, had it been
1269 * given a reasonable initial value instead of "unlimited", since HPACK
1270 * encoded headers are smaller than the HPACK decoded headers to which the
1271 * limit SETTINGS_MAX_HEADER_LIST_SIZE applies. */
1272
1273 n = m; /* reset n to beginning of first CONTINUATION frame */
1274
1275 /* Eliminate padding from first frame (HEADERS or PUSH_PROMISE) if PADDED */
1276 if (s[4] & H2_FLAG_PADDED) {
1277 const uint32_t plen = s[9];
1278 /* validate padding */
1279 const uint32_t flen = h2_u24(s);
1280 if (flen < 1 + plen + ((s[n+4] & H2_FLAG_PRIORITY) ? 5 : 0)) {
1281 /* Padding that exceeds the size remaining for the header block
1282 * fragment MUST be treated as a PROTOCOL_ERROR. */
1283 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1284 return 0;
1285 }
1286 /* set padding to 0 since we will overwrite padding in merge below */
1287 /* (alternatively, could memmove() 9 bytes of frame header over the
1288 * pad length octet, remove PADDED flag, add 1 to c->offset,
1289 * add 1 to s, subtract 1 from clen and subtract 1 from cqlen,
1290 * subtract 1 from n, add 1 to cq->bytes_out) */
1291 s[9] = 0;
1292 /* set offset to beginning of padding at end of first frame */
1293 m -= plen;
1294 /* XXX: layer violation; adjusts chunk.c internal accounting */
1295 cq->bytes_out += plen;
1296 }
1297
1298 #ifdef __COVERITY__
1299 /* Coverity does not notice that values used in s are checked.
1300 * Although silencing here, would prefer not to do so since doing so
1301 * disables Coverity from reporting questionable modifications which
1302 * might be made to the code in the future. */
1303 __coverity_tainted_data_sink__(s);
1304 #endif
1305
1306 do {
1307 const uint32_t flen = h2_u24(s+n);
1308 #ifdef __COVERITY__ /*flen values were checked in do {} while loop above*/
1309 if (clen < n+9+flen) {
1310 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
1311 return 0;
1312 }
1313 #endif
1314 flags = s[n+4];
1315 memmove(s+m, s+n+9, flen);
1316 m += flen;
1317 n += 9+flen;
1318 /* XXX: layer violation; adjusts chunk.c internal accounting */
1319 cq->bytes_out += 9;
1320 } while (!(flags & H2_FLAG_END_HEADERS));
1321 /* overwrite frame size */
1322 m -= 9; /*(temporarily remove frame header from len)*/
1323 s[0] = (m >> 16) & 0xFF;
1324 s[1] = (m >> 8) & 0xFF;
1325 s[2] = (m ) & 0xFF;
1326 m += 9;
1327 /* adjust chunk c->mem */
1328 if (n < clen) { /*(additional frames after CONTINUATION)*/
1329 memmove(s+m, s+n, clen-n);
1330 n = m + (clen-n);
1331 }
1332 else
1333 n = m;
1334 buffer_truncate(c->mem, n + (uint32_t)c->offset);
1335
1336 return m;
1337 }
1338
1339
1340 __attribute_cold__
1341 static request_st *
h2_recv_trailers_r(connection * const con,h2con * const h2c,const uint32_t id,const uint32_t flags)1342 h2_recv_trailers_r (connection * const con, h2con * const h2c, const uint32_t id, const uint32_t flags)
1343 {
1344 /* rant: RFC 7230 HTTP/1.1 trailer-part would have been much simpler
1345 * to support in RFC 7540 HTTP/2 as a distinct frame type rather than
1346 * HEADERS. As trailers are not known at the time the request is made,
1347 * reuse of such trailers is limited and so a theoretical TRAILERS frame
1348 * could have been implemented without HPACK encoding, and would have
1349 * been more straightforward to implement than overloading and having to
1350 * handle multiple cases for HEADERS. TRAILERS support could then also
1351 * be optional, like in HTTP/1.1 */
1352 request_st *r = NULL;
1353 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
1354 request_st * const rr = h2c->r[i];
1355 if (rr->h2id != id) continue;
1356 r = rr;
1357 break;
1358 }
1359 if (NULL == r) {
1360 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1361 return NULL;
1362 }
1363 if (r->h2state != H2_STATE_OPEN
1364 && r->h2state != H2_STATE_HALF_CLOSED_LOCAL) {
1365 h2_send_rst_stream(r, con, H2_E_STREAM_CLOSED);
1366 return NULL;
1367 }
1368 /* RFC 7540 is not explicit in restricting HEADERS (trailers) following
1369 * (optional) DATA frames, but in following HTTP/1.1, we limit to single
1370 * (optional) HEADERS (+ CONTINUATIONs) after (optional) DATA frame(s)
1371 * and require that the HEADERS frame set END_STREAM flag. */
1372 if (!(flags & H2_FLAG_END_STREAM)) {
1373 h2_send_rst_stream(r, con, H2_E_PROTOCOL_ERROR);
1374 return NULL;
1375 }
1376
1377 return h2_recv_end_data(r, con, 0) ? r : NULL;
1378 }
1379
1380
1381 static void
h2_parse_headers_frame(request_st * const restrict r,const unsigned char * psrc,const uint32_t plen,const int trailers)1382 h2_parse_headers_frame (request_st * const restrict r, const unsigned char *psrc, const uint32_t plen, const int trailers)
1383 {
1384 h2con * const h2c = r->con->h2;
1385 struct lshpack_dec * const restrict decoder = &h2c->decoder;
1386 const unsigned char * const endp = psrc + plen;
1387 http_header_parse_ctx hpctx;
1388 hpctx.hlen = 0;
1389 hpctx.pseudo = 1; /*(XXX: should be !trailers if handling trailers)*/
1390 hpctx.scheme = 0;
1391 hpctx.trailers = trailers;
1392 hpctx.max_request_field_size = r->conf.max_request_field_size;
1393 hpctx.http_parseopts = r->conf.http_parseopts;
1394 const int log_request_header = r->conf.log_request_header;
1395 int rc = LSHPACK_OK;
1396 /*buffer_clear(&r->target);*//*(initial state)*/
1397
1398 /*(h2_init_con() resized h2r->tmp_buf to 64k; shared with r->tmp_buf)*/
1399 buffer * const tb = r->tmp_buf;
1400 force_assert(tb->size >= 65536);/*(sanity check; remove in future)*/
1401 char * const tbptr = tb->ptr;
1402 const lsxpack_strlen_t tbsz = (tb->size <= LSXPACK_MAX_STRLEN)
1403 ? tb->size
1404 : LSXPACK_MAX_STRLEN;
1405
1406 /* note: #define LSHPACK_DEC_HTTP1X_OUTPUT 1 (default) configures
1407 * decoder to produce output in format: "field-name: value\r\n"
1408 * future: modify build system to define value to 0 in lshpack.h
1409 * against which lighttpd builds (or define value in build systems)
1410 * Then adjust code below to not use the HTTP/1.x compatibility,
1411 * as it is less efficient to copy into HTTP/1.1 request and reparse
1412 * than it is to directly parse each decoded header line. */
1413 lsxpack_header_t lsx;
1414 while (psrc < endp) {
1415 memset(&lsx, 0, sizeof(lsxpack_header_t));
1416 lsx.buf = tbptr;
1417 lsx.val_len = tbsz;
1418 rc = lshpack_dec_decode(decoder, &psrc, endp, &lsx);
1419 if (0 == lsx.name_len)
1420 rc = LSHPACK_ERR_BAD_DATA;
1421 if (__builtin_expect( (rc == LSHPACK_OK), 1)) {
1422 hpctx.k = lsx.buf+lsx.name_offset;
1423 hpctx.v = lsx.buf+lsx.val_offset;
1424 hpctx.klen = lsx.name_len;
1425 hpctx.vlen = lsx.val_len;
1426 /*assert(lsx.hpack_index < sizeof(lshpack_idx_http_header));*/
1427 hpctx.id = lshpack_idx_http_header[lsx.hpack_index];
1428
1429 if (log_request_header)
1430 log_error(r->conf.errh, __FILE__, __LINE__,
1431 "fd:%d id:%u rqst: %.*s: %.*s", r->con->fd, r->h2id,
1432 (int)hpctx.klen, hpctx.k, (int)hpctx.vlen, hpctx.v);
1433
1434 const int http_status = http_request_parse_header(r, &hpctx);
1435 if (__builtin_expect( (0 != http_status), 0)) {
1436 if (r->http_status == 0) /*might be set if processing trailers*/
1437 r->http_status = http_status;
1438 break;
1439 }
1440 }
1441 #if 0 /*(see catch-all below)*/
1442 /* Send GOAWAY (further below) (decoder state not maintained on error)
1443 * (see comments above why decoder state must be maintained) */
1444 /* XXX: future: could try to send :status 431 here
1445 * and reset other active streams in H2_STATE_OPEN */
1446 else if (rc == LSHPACK_ERR_MORE_BUF) {
1447 /* XXX: TODO if (r->conf.log_request_header_on_error) */
1448 r->http_status = 431; /* Request Header Fields Too Large */
1449 /*(try to avoid reading/buffering more data for this request)*/
1450 r->h2_rwin = 0; /*(out-of-sync with peer, but is error case)*/
1451 /*r->h2state = H2_STATE_HALF_CLOSED_REMOTE*/
1452 /* psrc was not advanced if LSHPACK_ERR_MORE_BUF;
1453 * processing must stop (since not retrying w/ larger buf)*/
1454 break;
1455 }
1456 #endif
1457 else { /* LSHPACK_ERR_BAD_DATA */
1458 /* GOAWAY with H2_E_PROTOCOL_ERROR is not specific enough
1459 * to tell peer to not retry request, so send RST_STREAM
1460 * (slightly more specific, but not by much) before GOAWAY*/
1461 /* LSHPACK_ERR_MORE_BUF is treated as an attack, send GOAWAY
1462 * (h2r->tmp_buf was resized to 64k in h2_init_con()) */
1463 request_h2error_t err = H2_E_COMPRESSION_ERROR;
1464 if (rc != LSHPACK_ERR_BAD_DATA) {
1465 /* LSHPACK_ERR_TOO_LARGE, LSHPACK_ERR_MORE_BUF */
1466 err = H2_E_PROTOCOL_ERROR;
1467 h2_send_rst_stream(r, r->con, err);
1468 }
1469 if (!h2c->sent_goaway && !trailers)
1470 h2c->h2_cid = r->h2id;
1471 h2_send_goaway_e(r->con, err);
1472 if (!trailers) {
1473 h2_retire_stream(r, r->con);
1474 return;
1475 }
1476 else {
1477 r->state = CON_STATE_ERROR;
1478 r->h2state = H2_STATE_CLOSED;
1479 }
1480 break;
1481 }
1482 }
1483
1484 hpctx.hlen += 2;
1485 r->rqst_header_len += hpctx.hlen;
1486 /*(accounting for mod_accesslog and mod_rrdtool)*/
1487 chunkqueue * const rq = &r->read_queue;
1488 rq->bytes_in += (off_t)hpctx.hlen;
1489 rq->bytes_out += (off_t)hpctx.hlen;
1490
1491 if (0 == r->http_status && LSHPACK_OK == rc && !trailers) {
1492 if (hpctx.pseudo)
1493 r->http_status =
1494 http_request_validate_pseudohdrs(r, hpctx.scheme,
1495 hpctx.http_parseopts);
1496 if (0 == r->http_status)
1497 http_request_headers_process_h2(r, r->con->proto_default_port);
1498 }
1499 }
1500
1501
1502 static int
h2_recv_headers(connection * const con,uint8_t * const s,uint32_t flen)1503 h2_recv_headers (connection * const con, uint8_t * const s, uint32_t flen)
1504 {
1505 #ifdef __COVERITY__
1506 /* Coverity does not notice that values used in s are checked.
1507 * Although silencing here, would prefer not to do so since doing so
1508 * disables Coverity from reporting questionable modifications which
1509 * might be made to the code in the future. */
1510 __coverity_tainted_data_sink__(s);
1511 #endif
1512 request_st *r = NULL;
1513 h2con * const h2c = con->h2;
1514 const uint32_t id = h2_u31(s+5);
1515 #if 0 /*(included in (!(id & 1)) below)*/
1516 if (0 == id) { /* HEADERS, PUSH_PROMISE stream id must != 0 */
1517 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1518 return 0;
1519 }
1520 #endif
1521 if (!(id & 1)) { /* stream id from client must be odd */
1522 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1523 return 0;
1524 }
1525
1526 request_st * const h2r = &con->request;
1527 int trailers = 0;
1528
1529 if (id > h2c->h2_cid) {
1530 if (h2c->rused == sizeof(h2c->r)/sizeof(*h2c->r))
1531 return h2_send_refused_stream(id, con);
1532 /* Note: MUST process HPACK decode even if already sent GOAWAY.
1533 * This is necessary since there may be active streams not in
1534 * H2_STATE_HALF_CLOSED_REMOTE, e.g. H2_STATE_OPEN, still possibly
1535 * receiving DATA and, more relevantly, still might receive HEADERS
1536 * frame with trailers, for which the decoder state is required.
1537 * XXX: future might try to reduce other processing done if sent
1538 * GOAWAY, e.g. might avoid allocating (request_st *r) */
1539 r = h2_init_stream(h2r, con);
1540 r->h2id = id;
1541 r->h2state = (s[4] & H2_FLAG_END_STREAM)
1542 ? H2_STATE_HALF_CLOSED_REMOTE
1543 : H2_STATE_OPEN;
1544 r->state = CON_STATE_REQUEST_END;
1545 /* Note: timestamps here are updated only after receipt of entire header
1546 * (HEADERS frame might have been sent in multiple packets
1547 * and CONTINUATION frames may have been sent in multiple packets)
1548 * (affects high precision timestamp, if enabled)
1549 * (large sets of headers are not typical, and even when they do
1550 * occur, they will typically be sent within the same second)
1551 * (future: might keep high precision timestamp in h2con when first
1552 * packet of HEADERS or PUSH_PROMISE is received, and clear that
1553 * timestamp when frame + CONTINUATION(s) are complete (so that
1554 * re-read of initial frame does not overwrite the timestamp))
1555 */
1556 r->start_hp.tv_sec = log_epoch_secs;
1557 if (r->conf.high_precision_timestamps)
1558 log_clock_gettime_realtime(&r->start_hp);
1559 }
1560 else {
1561 r = h2_recv_trailers_r(con, h2c, id, s[4]); /* (cold code path) */
1562 if (NULL == r)
1563 return (h2c->sent_goaway > 0) ? 0 : 1;
1564 trailers = 1;
1565 }
1566
1567 const unsigned char *psrc = s + 9;
1568 uint32_t alen = flen;
1569 if (s[4] & H2_FLAG_PADDED) {
1570 ++psrc;
1571 const uint32_t pad = s[9];
1572 if (alen < 1 + pad) {
1573 /* Padding that exceeds the size remaining for the header block
1574 * fragment MUST be treated as a PROTOCOL_ERROR. */
1575 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1576 if (!trailers)
1577 h2_retire_stream(r, con);
1578 else {
1579 r->state = CON_STATE_ERROR;
1580 r->h2state = H2_STATE_CLOSED;
1581 }
1582 return 0;
1583 }
1584 alen -= (1 + pad); /*(alen is adjusted for PRIORITY below)*/
1585 }
1586 if (s[4] & H2_FLAG_PRIORITY) {
1587 /* XXX: TODO: handle PRIORITY (prio fields start at *psrc) */
1588 if (alen < 5 || (/*prio = */h2_u32(psrc)) == id) {
1589 h2_send_rst_stream(r, con, H2_E_PROTOCOL_ERROR);
1590 if (!trailers)
1591 h2_retire_stream(r, con);
1592 return 1;
1593 }
1594 #if 0
1595 uint32_t exclusive_dependency = (psrc[0] & 0x80) ? 1 : 0;
1596 /*(ignore dependency prid and exclusive_dependency,
1597 * and attempt to scale PRIORITY weight (weight+1 default is 16)
1598 * to PRIORITY_UPDATE (default urgency 3) (see h2_init_stream()))*/
1599 uint8_t weight = psrc[4] >> 2;
1600 r->h2_prio = ((weight < 8 ? weight : 7) << 1) | !0;
1601 #endif
1602 psrc += 5;
1603 alen -= 5;
1604 }
1605
1606 h2_parse_headers_frame(r, psrc, alen, trailers);
1607
1608 if (__builtin_expect( (trailers), 0))
1609 return 1;
1610
1611 #if 0 /*(handled in h2_parse_frames() as a connection error)*/
1612 /* not handled here:
1613 * r is invalid if h2_parse_headers_frame() HPACK decode error */
1614 if (s[3] == H2_FTYPE_PUSH_PROMISE) {
1615 /* Had to process HPACK to keep HPACK tables sync'd with peer but now
1616 * discard the request if PUSH_PROMISE, since not expected, as this code
1617 * is running as a server, not as a client.
1618 * XXX: future might try to reduce other processing done if
1619 * discarding, e.g. might avoid allocating (request_st *r) */
1620 /* rant: PUSH_PROMISE could have been a flag on HEADERS frame
1621 * instead of an independent frame type */
1622 r->http_status = 0;
1623 h2_retire_stream(r, con);
1624 }
1625 #endif
1626
1627 if (!h2c->sent_goaway) {
1628 h2c->h2_cid = id;
1629 if (!light_btst(r->rqst_htags, HTTP_HEADER_CONTENT_LENGTH))
1630 r->reqbody_length = (s[4] & H2_FLAG_END_STREAM) ? 0 : -1;
1631 #if 0
1632 else if (r->reqbody_length > 0 && (s[4] & H2_FLAG_END_STREAM)) {
1633 /*(handled in connection_handle_read_post_state())*/
1634 /* XXX: TODO if (r->conf.log_request_header_on_error) */
1635 r->http_status = 400; /* Bad Request */
1636 }
1637 #endif
1638 /*(lighttpd.conf config conditions not yet applied to request,
1639 * but do not increase window size if BUFMIN set in global config)*/
1640 if (r->reqbody_length /*(see h2_init_con() for session window)*/
1641 && !(r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BUFMIN))
1642 h2_send_window_update(con, id, 131072); /*(add 128k)*/
1643
1644 if (light_btst(r->rqst_htags, HTTP_HEADER_PRIORITY)) {
1645 const buffer * const prio =
1646 http_header_request_get(r, HTTP_HEADER_PRIORITY,
1647 CONST_STR_LEN("priority"));
1648 r->h2_prio = h2_parse_priority_update(BUF_PTR_LEN(prio));
1649 }
1650 else {
1651 #if 0
1652 /* TODO: might check to match saved prid if PRIORITY_UPDATE frame
1653 * received prior to HEADERS, and apply urgency, incremental vals */
1654 if (0)
1655 r->h2_prio = x;
1656 else
1657 #endif
1658 { /*(quick peek at raw (non-normalized) r->target)*/
1659 /*(bump .js and .css to urgency 2; see h2_init_stream())*/
1660 const uint32_t len = buffer_clen(&r->target);
1661 const char * const p = r->target.ptr+len-4;
1662 if (len>=4 && (0==memcmp(p+1,".js",3)||0==memcmp(p,".css",4))) {
1663 r->h2_prio = (2 << 1) | !0; /*(urgency=2, incremental=0)*/
1664 http_header_response_set(r, HTTP_HEADER_PRIORITY,
1665 CONST_STR_LEN("priority"),
1666 CONST_STR_LEN("u=2"));
1667 }
1668 }
1669 }
1670 if (h2c->rused-1) /*(true if more than one active stream)*/
1671 h2_apply_priority_update(h2c, r, h2c->rused-1);
1672
1673 /* RFC 7540 Section 8. HTTP Message Exchanges
1674 * 8.1.2.6. Malformed Requests and Responses
1675 * For malformed requests, a server MAY send an HTTP
1676 * response prior to closing or resetting the stream.
1677 * However, h2spec expects stream PROTOCOL_ERROR.
1678 * (This is unfortunate, since we would rather send
1679 * 400 Bad Request which tells client *do not* retry
1680 * the bad request without modification)
1681 * https://github.com/summerwind/h2spec/issues/120
1682 * https://github.com/summerwind/h2spec/issues/121
1683 * https://github.com/summerwind/h2spec/issues/122
1684 */
1685 #if 0
1686 if (__builtin_expect( (400 == r->http_status), 0)) {
1687 h2_send_rst_stream(r, con, H2_E_PROTOCOL_ERROR);
1688 h2_retire_stream(r, con);
1689 /*(h2_retire_stream() invalidates r; must not use r below)*/
1690 }
1691 #endif
1692 }
1693 else if (h2c->h2_cid < id) {
1694 /* Had to process HPACK to keep HPACK tables sync'd with peer
1695 * but now discard the request if id is after id sent in GOAWAY.
1696 * XXX: future might try to reduce other processing done if
1697 * discarding, e.g. might avoid allocating (request_st *r) */
1698 r->http_status = 0;
1699 h2_retire_stream(r, con);
1700 }
1701
1702 return 1;
1703 }
1704
1705
1706 int
h2_parse_frames(connection * const con)1707 h2_parse_frames (connection * const con)
1708 {
1709 /* read and process HTTP/2 frames from socket */
1710 h2con * const h2c = con->h2;
1711 chunkqueue * const cq = con->read_queue;
1712 /* initial max frame size is the minimum: 16k
1713 * (lighttpd does not currently increase max frame size)
1714 * (lighttpd does not currently decrease max frame size)
1715 * (XXX: If SETTINGS_MAX_FRAME_SIZE were increased and then decreased,
1716 * should accept the larger frame size until SETTINGS is ACK'd) */
1717 const uint32_t fsize = h2c->s_max_frame_size;
1718 for (off_t cqlen = chunkqueue_length(cq); cqlen >= 9; ) {
1719 chunk *c = cq->first;
1720 /*assert(c->type == MEM_CHUNK);*/
1721 /* copy data if frame header crosses chunk boundary
1722 * future: be more efficient than blind full chunk copy */
1723 uint32_t clen = buffer_clen(c->mem) - c->offset;
1724 if (clen < 9) {
1725 clen = h2_frame_cq_compact(cq, 9);
1726 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1727 }
1728 uint8_t *s = (uint8_t *)(c->mem->ptr + c->offset);
1729 uint32_t flen = h2_u24(s);
1730 if (flen > fsize) {
1731 h2_send_goaway_e(con, H2_E_FRAME_SIZE_ERROR);
1732 return 0;
1733 }
1734
1735 /*(handle PUSH_PROMISE as connection error further below)*/
1736 /*if (s[3] == H2_FTYPE_HEADERS || s[3] == H2_FTYPE_PUSH_PROMISE)*/
1737
1738 if (s[3] == H2_FTYPE_HEADERS) {
1739 if (cqlen < 9+flen) return 1; /* incomplete frame; go on */
1740 if (clen < 9+flen) {
1741 clen = h2_frame_cq_compact(cq, 9+flen);
1742 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1743 s = (uint8_t *)(c->mem->ptr + c->offset);
1744 }
1745
1746 if (!(s[4] & H2_FLAG_END_HEADERS)) {
1747 /* collect CONTINUATION frames (cold code path) */
1748 /* note: h2_recv_continuation() return value is overloaded
1749 * and the resulting clen is 9+flen of *concatenated* frames */
1750 clen = h2_recv_continuation(9+flen, clen, cqlen, cq, con);
1751 if (0 == clen) return 0;
1752 if (cqlen < clen) return 1; /* incomplete frames; go on */
1753 c = cq->first; /*(reload after h2_recv_continuation())*/
1754 s = (uint8_t *)(c->mem->ptr + c->offset);
1755 /* frame size was also updated and might (legitimately)
1756 * exceed SETTINGS_MAX_FRAME_SIZE, so do not test fsize again */
1757 flen = h2_u24(s);
1758 /* recalculate after CONTINUATION removed */
1759 cqlen = chunkqueue_length(cq);
1760 }
1761
1762 #ifdef __COVERITY__
1763 /* Coverity does not notice that values used in s are checked.
1764 * Although silencing here, would prefer not to do so since doing so
1765 * disables Coverity from reporting questionable modifications which
1766 * might be made to the code in the future. */
1767 __coverity_tainted_data_sink__(s);
1768 #endif
1769
1770 int rc = h2_recv_headers(con, s, flen);
1771 cqlen -= (9+flen);
1772 if (rc >= 0)
1773 chunkqueue_mark_written(cq, 9+flen);
1774 if (rc <= 0)
1775 return 0;
1776 con->read_idle_ts = log_monotonic_secs;
1777 }
1778 else if (s[3] == H2_FTYPE_DATA) {
1779 /* future: might try to stream data for incomplete frames,
1780 * but that would require keeping additional state for partially
1781 * read frames, including cleaning up if errors occur.
1782 * Since well-behaved clients do not intentionally send partial
1783 * frames, and try to resend if socket buffers are full, this is
1784 * probably not a big concern in practice. */
1785 if (cqlen < 9+flen) return 1; /* incomplete frame; go on */
1786 con->read_idle_ts = log_monotonic_secs;
1787 /*(h2_recv_data() must consume frame from cq or else return 0)*/
1788 if (!h2_recv_data(con, s, flen))
1789 return 0;
1790 cqlen -= (9+flen);
1791 }
1792 else {
1793 /* frame types below are expected to be small
1794 * most frame types below have fixed (small) size
1795 * 4 bytes - WINDOW_UPDATE
1796 * 5 bytes - PRIORITY
1797 * 8 bytes - PING
1798 * 4 bytes - RST_STREAM
1799 * some are variable size
1800 * SETTINGS (6 * #settings; 6 defined in RFC 7540 Section 6.5)
1801 * GOAWAY (8 + optional additional debug data (variable))
1802 * XXX: might add sanity check for a max flen here,
1803 * before waiting to read partial frame
1804 * (fsize limit is still enforced above for all frames)
1805 */
1806 if (cqlen < 9+flen) return 1; /* incomplete frame; go on */
1807 if (clen < 9+flen) {
1808 clen = h2_frame_cq_compact(cq, 9+flen); UNUSED(clen);
1809 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1810 s = (uint8_t *)(c->mem->ptr + c->offset);
1811 }
1812 switch (s[3]) { /* frame type */
1813 case H2_FTYPE_WINDOW_UPDATE:
1814 h2_recv_window_update(con, s, flen);
1815 break;
1816 case H2_FTYPE_PRIORITY_UPDATE:
1817 h2_recv_priority_update(con, s, flen);
1818 break;
1819 case H2_FTYPE_SETTINGS:
1820 h2_recv_settings(con, s, flen);
1821 break;
1822 case H2_FTYPE_PING:
1823 h2_recv_ping(con, s, flen);
1824 break;
1825 case H2_FTYPE_RST_STREAM:
1826 h2_recv_rst_stream(con, s, flen);
1827 break;
1828 case H2_FTYPE_GOAWAY:
1829 if (!h2_recv_goaway(con, s, flen)) return 0;
1830 break;
1831 case H2_FTYPE_PRIORITY:
1832 h2_recv_priority(con, s, flen);
1833 break;
1834 case H2_FTYPE_PUSH_PROMISE: /*not expected from client*/
1835 case H2_FTYPE_CONTINUATION: /*handled with HEADERS*/
1836 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1837 return 0;
1838 default: /* ignore unknown frame types */
1839 break;
1840 }
1841 cqlen -= (9+flen);
1842 chunkqueue_mark_written(cq, 9+flen);
1843 }
1844
1845 if (h2c->sent_goaway > 0) return 0;
1846 }
1847
1848 return 1;
1849 }
1850
1851
1852 int
h2_want_read(connection * const con)1853 h2_want_read (connection * const con)
1854 {
1855 chunkqueue * const cq = con->read_queue;
1856 if (chunkqueue_is_empty(cq)) return 1;
1857
1858 /* check for partial frame */
1859 const off_t cqlen = chunkqueue_length(cq);
1860 if (cqlen < 9) return 1;
1861 chunk *c = cq->first;
1862 uint32_t clen = buffer_clen(c->mem) - c->offset;
1863 if (clen < 9) {
1864 clen = h2_frame_cq_compact(cq, 9);
1865 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1866 }
1867 uint8_t *s = (uint8_t *)(c->mem->ptr + c->offset);
1868 uint32_t flen = h2_u24(s);
1869 if (clen < 9+flen) return 1;
1870
1871 /* check if not HEADERS, or if HEADERS has END_HEADERS flag */
1872 if (s[3] != H2_FTYPE_HEADERS || (s[4] & H2_FLAG_END_HEADERS))
1873 return 0;
1874
1875 /* check for partial CONTINUATION frames */
1876 for (uint32_t n = 9+flen; cqlen >= n+9; n += 9+flen) {
1877 if (clen < n+9) {
1878 clen = h2_frame_cq_compact(cq, n+9);
1879 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1880 s = (uint8_t *)(c->mem->ptr + c->offset);
1881 }
1882 flen = h2_u24(s+n);
1883 if (cqlen < n+9+flen) return 1; /* incomplete frame; go on */
1884 if (s[4] & H2_FLAG_END_HEADERS) return 0;
1885 }
1886
1887 return 1;
1888 }
1889
1890
1891 static int
h2_recv_client_connection_preface(connection * const con)1892 h2_recv_client_connection_preface (connection * const con)
1893 {
1894 /* check if the client Connection Preface (24 bytes) has been received
1895 * (initial SETTINGS frame should immediately follow, but is not checked) */
1896 chunkqueue * const cq = con->read_queue;
1897 if (chunkqueue_length(cq) < 24) {
1898 chunk * const c = cq->first;
1899 if (c && buffer_clen(c->mem) - c->offset >= 4) {
1900 const char * const s = c->mem->ptr + c->offset;
1901 if (s[0]!='P'||s[1]!='R'||s[2]!='I'||s[3]!=' ') {
1902 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1903 return 1; /* error; done receiving connection preface */
1904 }
1905 }
1906 return 0; /*(not ready yet)*/
1907 }
1908
1909 static const char h2preface[] = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
1910 chunk *c = cq->first;
1911 const uint32_t clen = buffer_clen(c->mem) - c->offset;
1912 if (clen < 24) h2_frame_cq_compact(cq, 24);
1913 c = cq->first; /*(reload after h2_frame_cq_compact())*/
1914 const uint8_t * const s = (uint8_t *)(c->mem->ptr + c->offset);
1915 if (0 == memcmp(s, h2preface, 24)) /* sizeof(h2preface)-1) */
1916 chunkqueue_mark_written(cq, 24);
1917 else
1918 h2_send_goaway_e(con, H2_E_PROTOCOL_ERROR);
1919 return 1; /* done receiving connection preface (even if error occurred) */
1920 }
1921
1922
1923 __attribute_cold__
1924 static int
h2_read_client_connection_preface(struct connection * const con,chunkqueue * const cq,off_t max_bytes)1925 h2_read_client_connection_preface (struct connection * const con, chunkqueue * const cq, off_t max_bytes)
1926 {
1927 /* temporary con->network_read() filter until connection preface received */
1928
1929 /*(alternatively, func ptr could be saved in an element in (h2con *))*/
1930 void ** const hctx = con->plugin_ctx+0; /*(0 idx used for h2)*/
1931 int(* const network_read)(struct connection *, chunkqueue *, off_t) =
1932 (int(*)(struct connection *, chunkqueue *, off_t))(uintptr_t)(*hctx);
1933 if (max_bytes < 24) max_bytes = 24; /*(should not happen)*/
1934 int rc = (network_read)(con, cq, max_bytes);
1935 if (NULL == con->h2) return rc; /*(unexpected; already cleaned up)*/
1936 if (-1 != rc && h2_recv_client_connection_preface(con)) {
1937 con->network_read = network_read;
1938 *hctx = NULL;
1939 /*(intentionally update timestamp only after reading preface complete)*/
1940 con->read_idle_ts = log_monotonic_secs;
1941 }
1942 return rc;
1943 }
1944
1945
1946 void
h2_init_con(request_st * const restrict h2r,connection * const restrict con,const buffer * const restrict http2_settings)1947 h2_init_con (request_st * const restrict h2r, connection * const restrict con, const buffer * const restrict http2_settings)
1948 {
1949 h2con * const h2c = con->h2 = ck_calloc(1, sizeof(h2con));
1950 con->read_idle_ts = log_monotonic_secs;
1951 con->keep_alive_idle = h2r->conf.max_keep_alive_idle;
1952
1953 h2r->h2_rwin = 262144; /* h2 connection recv window (256k)*/
1954 h2r->h2_swin = 65535; /* h2 connection send window */
1955 h2r->h2_rwin_fudge = 0;
1956 /* settings sent from peer */ /* initial values */
1957 h2c->s_header_table_size = 4096; /* SETTINGS_HEADER_TABLE_SIZE */
1958 h2c->s_enable_push = 1; /* SETTINGS_ENABLE_PUSH */
1959 h2c->s_max_concurrent_streams= ~0u; /* SETTINGS_MAX_CONCURRENT_STREAMS */
1960 h2c->s_initial_window_size = 65536; /* SETTINGS_INITIAL_WINDOW_SIZE */
1961 h2c->s_max_frame_size = 16384; /* SETTINGS_MAX_FRAME_SIZE */
1962 h2c->s_max_header_list_size = ~0u; /* SETTINGS_MAX_HEADER_LIST_SIZE */
1963 h2c->sent_settings = log_monotonic_secs;/*(send SETTINGS below)*/
1964
1965 lshpack_dec_init(&h2c->decoder);
1966 lshpack_enc_init(&h2c->encoder);
1967 lshpack_enc_use_hist(&h2c->encoder, 1);
1968
1969 if (http2_settings) /*(if Upgrade: h2c)*/
1970 h2_parse_frame_settings(con, (uint8_t *)BUF_PTR_LEN(http2_settings));
1971
1972 static const uint8_t h2settings[] = { /*(big-endian numbers)*/
1973 /* SETTINGS */
1974 0x00, 0x00, 0x1e /* frame length */ /* 5 * (6 bytes per setting) */
1975 ,H2_FTYPE_SETTINGS /* frame type */
1976 ,0x00 /* frame flags */
1977 ,0x00, 0x00, 0x00, 0x00 /* stream identifier */
1978 ,0x00, H2_SETTINGS_MAX_CONCURRENT_STREAMS
1979 ,0x00, 0x00, 0x00, 0x08 /* 8 */
1980 #if 0 /* ? explicitly disable dynamic table ? (and adjust frame length) */
1981 /* If this is sent, must wait until peer sends SETTINGS with ACK
1982 * before disabling dynamic table in HPACK decoder */
1983 /*(before calling lshpack_dec_set_max_capacity(&h2c->decoder, 0))*/
1984 ,0x00, H2_SETTINGS_HEADER_TABLE_SIZE
1985 ,0x00, 0x00, 0x00, 0x00 /* 0 */
1986 #endif
1987 #if 0 /* ? explicitly disable push ? (and adjust frame length) */
1988 ,0x00, H2_SETTINGS_ENABLE_PUSH
1989 ,0x00, 0x00, 0x00, 0x00 /* 0 */
1990 #endif
1991 ,0x00, H2_SETTINGS_INITIAL_WINDOW_SIZE /*(must match in h2_init_stream())*/
1992 ,0x00, 0x01, 0x00, 0x00 /* 65536 *//*multiple of SETTINGS_MAX_FRAME_SIZE*/
1993 #if 0 /* ? increase from default (16384) ? (and adjust frame length) */
1994 ,0x00, H2_SETTINGS_MAX_FRAME_SIZE
1995 ,0x00, 0x00, 0x80, 0x00 /* 32768 */
1996 #endif
1997 ,0x00, H2_SETTINGS_MAX_HEADER_LIST_SIZE
1998 ,0x00, 0x00, 0xFF, 0xFF /* 65535 */
1999 ,0x00, H2_SETTINGS_ENABLE_CONNECT_PROTOCOL
2000 ,0x00, 0x00, 0x00, 0x01 /* 1 */
2001 ,0x00, H2_SETTINGS_NO_RFC7540_PRIORITIES
2002 ,0x00, 0x00, 0x00, 0x01 /* 1 */
2003
2004 /* WINDOW_UPDATE */
2005 ,0x00, 0x00, 0x04 /* frame length */
2006 ,H2_FTYPE_WINDOW_UPDATE /* frame type */
2007 ,0x00 /* frame flags */
2008 ,0x00, 0x00, 0x00, 0x00 /* stream identifier */
2009 ,0x00, 0x03, 0x00, 0x01 /* 196609 *//*(increase connection rwin to 256k)*/
2010 };
2011
2012 chunkqueue_append_mem(con->write_queue,
2013 (const char *)h2settings, sizeof(h2settings));
2014
2015 if (!h2_recv_client_connection_preface(con)) {
2016 /*(alternatively, func ptr could be saved in an element in (h2con *))*/
2017 con->plugin_ctx[0] = (void *)(uintptr_t)con->network_read;
2018 con->network_read = h2_read_client_connection_preface;
2019 /* note: no steps taken to reset con->network_read() on error
2020 * as con->network_read() is always set in connection_accepted() */
2021 }
2022
2023 buffer_string_prepare_copy(h2r->tmp_buf, 65535);
2024 }
2025
2026
2027 static void
h2_send_hpack(request_st * const r,connection * const con,const char * data,uint32_t dlen,const uint32_t flags)2028 h2_send_hpack (request_st * const r, connection * const con, const char *data, uint32_t dlen, const uint32_t flags)
2029 {
2030 union {
2031 uint8_t c[12];
2032 uint32_t u[3]; /*(alignment)*/
2033 } headers = { { /*(big-endian numbers)*/
2034 0x00, 0x00, 0x00 /* padding for alignment; do not send */
2035 /* HEADERS */
2036 ,0x00, 0x00, 0x00 /* frame length (fill in below) */
2037 ,H2_FTYPE_HEADERS /* frame type */
2038 ,(uint8_t)flags /* frame flags (e.g. END_STREAM for trailers) */
2039 ,0x00, 0x00, 0x00, 0x00 /* stream identifier (fill in below) */
2040 } };
2041
2042 headers.u[2] = htonl(r->h2id);
2043
2044 if (flags & H2_FLAG_END_STREAM) {
2045 /* step r->h2state
2046 * H2_STATE_OPEN -> H2_STATE_HALF_CLOSED_LOCAL
2047 * or
2048 * H2_STATE_HALF_CLOSED_REMOTE -> H2_STATE_CLOSED */
2049 #if 1
2050 ++r->h2state;
2051 #else
2052 r->h2state = (r->h2state == H2_STATE_HALF_CLOSED_REMOTE)
2053 ? H2_STATE_CLOSED
2054 : H2_STATE_HALF_CLOSED_LOCAL;
2055 #endif
2056 }
2057
2058 /* similar to h2_send_data(), but unlike DATA frames there is a HEADERS
2059 * frame potentially followed by CONTINUATION frame(s) here, and the final
2060 * HEADERS or CONTINUATION frame here has END_HEADERS flag set.
2061 * For trailers, END_STREAM flag is set on HEADERS frame. */
2062
2063 /*(approximate space needed for frames (header + payload)
2064 * with slight over-estimate of 16 bytes per frame header (> 9)
2065 * and minimum SETTING_MAX_FRAME_SIZE of 16k (could be larger)
2066 * (dlen >> 14)+1 is num 16k frames needed, multiplied by 16 bytes
2067 * per frame can be appoximated with (dlen>>10) + 9)*/
2068 buffer * const b =
2069 chunkqueue_append_buffer_open_sz(con->write_queue, dlen + (dlen>>10) + 9);
2070 char * restrict ptr = b->ptr;
2071 h2con * const h2c = con->h2;
2072 const uint32_t fsize = h2c->s_max_frame_size;
2073 do {
2074 const uint32_t len = dlen < fsize ? dlen : fsize;
2075 headers.c[3] = (len >> 16) & 0xFF; /*(off +3 to skip over align pad)*/
2076 headers.c[4] = (len >> 8) & 0xFF;
2077 headers.c[5] = (len ) & 0xFF;
2078 if (len == dlen)
2079 headers.c[7] |= H2_FLAG_END_HEADERS;
2080 #if 0
2081 chunkqueue_append_mem(con->write_queue, /*(+3 to skip over align pad)*/
2082 (const char *)headers.c+3, sizeof(headers)-3);
2083 chunkqueue_append_mem(con->write_queue, data, len);
2084 #else
2085 memcpy(ptr, headers.c+3, sizeof(headers)-3);
2086 memcpy(ptr+sizeof(headers)-3, data, len);
2087 ptr += len + sizeof(headers)-3;
2088 #endif
2089 data += len;
2090 dlen -= len;
2091 /*(include H2_FLAG_END_STREAM in HEADERS frame, not CONTINUATION)*/
2092 headers.c[6] = H2_FTYPE_CONTINUATION; /*(if additional frames needed)*/
2093 headers.c[7] = 0x00; /*(off +3 to skip over align pad)*/
2094 } while (dlen);
2095 buffer_truncate(b, (uint32_t)(ptr - b->ptr));
2096 chunkqueue_append_buffer_commit(con->write_queue);
2097 }
2098
2099
2100 __attribute_cold__
2101 __attribute_noinline__
2102 static void
h2_log_response_header_lsx(request_st * const r,const lsxpack_header_t * const lsx)2103 h2_log_response_header_lsx(request_st * const r, const lsxpack_header_t * const lsx)
2104 {
2105 log_error(r->conf.errh, __FILE__, __LINE__,
2106 "fd:%d id:%u resp: %.*s: %.*s", r->con->fd, r->h2id,
2107 (int)lsx->name_len, lsx->buf + lsx->name_offset,
2108 (int)lsx->val_len, lsx->buf + lsx->val_offset);
2109 }
2110
2111
2112 __attribute_cold__
2113 static void
h2_log_response_header(request_st * const r,const int len,const char * const hdr)2114 h2_log_response_header(request_st * const r, const int len, const char * const hdr)
2115 {
2116 log_error(r->conf.errh, __FILE__, __LINE__,
2117 "fd:%d id:%u resp: %.*s", r->con->fd, r->h2id, len, hdr);
2118 }
2119
2120
2121 void
h2_send_headers(request_st * const r,connection * const con)2122 h2_send_headers (request_st * const r, connection * const con)
2123 {
2124 /*(set keep_alive_idle; out-of-place and non-event for most configs,
2125 * but small attempt to (maybe) preserve behavior for specific configs)*/
2126 con->keep_alive_idle = r->conf.max_keep_alive_idle;
2127
2128 /* specialized version of http_response_write_header(); send headers
2129 * directly to HPACK encoder, rather than double-buffering in chunkqueue */
2130
2131 if (304 == r->http_status
2132 && light_btst(r->resp_htags, HTTP_HEADER_CONTENT_ENCODING))
2133 http_header_response_unset(r, HTTP_HEADER_CONTENT_ENCODING,
2134 CONST_STR_LEN("Content-Encoding"));
2135
2136 /*(h2_init_con() resized h2r->tmp_buf to 64k; shared with r->tmp_buf)*/
2137 buffer * const tb = r->tmp_buf;
2138 force_assert(tb->size >= 65536);/*(sanity check; remove in future)*/
2139 unsigned char *dst = (unsigned char *)tb->ptr;
2140 unsigned char * const dst_end = (unsigned char *)tb->ptr + tb->size;
2141
2142 h2con * const h2c = con->h2;
2143 struct lshpack_enc * const encoder = &h2c->encoder;
2144 lsxpack_header_t lsx;
2145 uint32_t alen = 7+3+4; /* ":status: xxx\r\n" */
2146 const int log_response_header = r->conf.log_response_header;
2147 const int resp_header_repeated = r->resp_header_repeated;
2148
2149 char status[12] = ":status: 200";
2150
2151 memset(&lsx, 0, sizeof(lsxpack_header_t));
2152 lsx.buf = status;
2153 lsx.name_offset = 0;
2154 lsx.name_len = 7;
2155 lsx.val_offset = 9;
2156 lsx.val_len = 3;
2157 if (__builtin_expect( (200 == r->http_status), 1)) {
2158 lsx.hpack_index = LSHPACK_HDR_STATUS_200;
2159 }
2160 else {
2161 int x = r->http_status; /*(expect status < 1000; should be [100-599])*/
2162 switch (x) {
2163 /*case 200: lsx.hpack_index = LSHPACK_HDR_STATUS_200; break;*/
2164 case 204: lsx.hpack_index = LSHPACK_HDR_STATUS_204; break;
2165 case 206: lsx.hpack_index = LSHPACK_HDR_STATUS_206; break;
2166 case 304: lsx.hpack_index = LSHPACK_HDR_STATUS_304; break;
2167 case 400: lsx.hpack_index = LSHPACK_HDR_STATUS_400; break;
2168 case 404: lsx.hpack_index = LSHPACK_HDR_STATUS_404; break;
2169 case 500: lsx.hpack_index = LSHPACK_HDR_STATUS_500; break;
2170 default:
2171 break;
2172 }
2173 int nx;
2174 status[11] += (x - (nx = x/10) * 10); /* (x % 10) */
2175 x = nx;
2176 status[10] += (x - (nx = x/10) * 10); /* (x / 10 % 10) */
2177 status[9] = '0' + nx; /* (x / 100) */
2178 }
2179
2180 dst = lshpack_enc_encode(encoder, dst, dst_end, &lsx);
2181 if (dst == (unsigned char *)tb->ptr) {
2182 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2183 return;
2184 }
2185
2186 if (log_response_header)
2187 h2_log_response_header(r, 12, status);
2188
2189 /* add all headers */
2190 data_string * const * const restrict hdata =
2191 (data_string * const *)r->resp_headers.data;
2192 for (uint32_t i = 0, used = r->resp_headers.used; i < used; ++i) {
2193 data_string * const ds = hdata[i];
2194 const uint32_t klen = buffer_clen(&ds->key);
2195 const uint32_t vlen = buffer_clen(&ds->value);
2196 if (__builtin_expect( (0 == klen), 0)) continue;
2197 if (__builtin_expect( (0 == vlen), 0)) continue;
2198 alen += klen + vlen + 4;
2199
2200 if (alen > LSXPACK_MAX_STRLEN) {
2201 /* ls-hpack default limit (UINT16_MAX) is per-line, due to field
2202 * sizes of lsx.name_offset,lsx.name_len,lsx.val_offset,lsx.val_len
2203 * However, similar to elsewhere, limit total size of expanded
2204 * headers to (very generous) 64k - 1. Peers might allow less. */
2205 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2206 return;
2207 }
2208
2209 /* HTTP/2 requires lowercase keys
2210 * ls-hpack requires key and value be in same buffer
2211 * Since keys are typically short, append (and lowercase) key onto
2212 * end of value buffer, following '\0' after end of value, and
2213 * without modifying ds->value.used or overwriting '\0' */
2214 char * const v =
2215 __builtin_expect( (buffer_string_space(&ds->value) >= klen), 1)
2216 ? ds->value.ptr+vlen+1 /*perf: inline check before call*/
2217 : buffer_string_prepare_append(&ds->value, klen)+1;
2218 if (__builtin_expect( (ds->ext != HTTP_HEADER_OTHER), 1)) {
2219 memcpy(v, http_header_lc[ds->ext], klen);
2220 }
2221 else {
2222 const char * const restrict k = ds->key.ptr;
2223 if ((k[0] & 0xdf) == 'X' && http_response_omit_header(r, ds)) {
2224 alen -= klen + vlen + 4;
2225 continue;
2226 }
2227 for (uint32_t j = 0; j < klen; ++j)
2228 v[j] = !light_isupper(k[j]) ? k[j] : (k[j] | 0x20);
2229 }
2230
2231 uint32_t voff = 0;
2232 const char *n;
2233 lsx.buf = ds->value.ptr;
2234 do {
2235 n = !resp_header_repeated
2236 ? NULL
2237 : memchr(lsx.buf+voff, '\n', vlen - voff);
2238
2239 memset(&lsx, 0, sizeof(lsxpack_header_t));
2240 lsx.hpack_index = http_header_lshpack_idx[ds->ext];
2241 lsx.buf = ds->value.ptr;
2242 lsx.name_offset = vlen+1;
2243 lsx.name_len = klen;
2244 lsx.val_offset = voff;
2245 if (NULL == n)
2246 lsx.val_len = vlen - voff;
2247 else {
2248 /* multiple headers (same field-name) separated by "\r\n"
2249 * and then "field-name: " (see http_header_response_insert())*/
2250 voff = (uint32_t)(n + 1 - lsx.buf);
2251 lsx.val_len = voff - 2 - lsx.val_offset; /*(-2 for "\r\n")*/
2252 voff += klen + 2;
2253 }
2254
2255 if (log_response_header)
2256 h2_log_response_header_lsx(r, &lsx);
2257
2258 unsigned char * const dst_in = dst;
2259 dst = lshpack_enc_encode(encoder, dst, dst_end, &lsx);
2260 if (dst == dst_in) {
2261 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2262 return;
2263 }
2264 } while (n);
2265 }
2266
2267 if (!light_btst(r->resp_htags, HTTP_HEADER_DATE)) {
2268 /* HTTP/1.1 and later requires a Date: header */
2269 /* "date: " 6-chars + 30-chars for "%a, %d %b %Y %T GMT" + '\0' */
2270 static unix_time64_t tlast = 0;
2271 static char tstr[36] = "date: ";
2272
2273 memset(&lsx, 0, sizeof(lsxpack_header_t));
2274 lsx.buf = tstr;
2275 lsx.name_offset = 0;
2276 lsx.name_len = 4;
2277 lsx.val_offset = 6;
2278 lsx.val_len = 29;
2279 lsx.hpack_index = LSHPACK_HDR_DATE;
2280
2281 /* cache the generated timestamp */
2282 const unix_time64_t cur_ts = log_epoch_secs;
2283 if (__builtin_expect ( (tlast != cur_ts), 0))
2284 http_date_time_to_str(tstr+6, sizeof(tstr)-6, (tlast = cur_ts));
2285
2286 alen += 35+2;
2287
2288 if (log_response_header)
2289 h2_log_response_header(r, 35, tstr);
2290
2291 unsigned char * const dst_in = dst;
2292 dst = lshpack_enc_encode(encoder, dst, dst_end, &lsx);
2293 if (dst == dst_in) {
2294 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2295 return;
2296 }
2297 }
2298
2299 if (!light_btst(r->resp_htags, HTTP_HEADER_SERVER) && r->conf.server_tag) {
2300 /*("server" is appended after '\0' in r->conf.server_tag at startup)*/
2301 const uint32_t vlen = buffer_clen(r->conf.server_tag);
2302
2303 alen += 6+vlen+4;
2304
2305 memset(&lsx, 0, sizeof(lsxpack_header_t));
2306 lsx.buf = r->conf.server_tag->ptr;
2307 lsx.name_offset = vlen+1;
2308 lsx.name_len = 6;
2309 lsx.val_offset = 0;
2310 lsx.val_len = vlen;
2311 lsx.hpack_index = LSHPACK_HDR_SERVER;
2312
2313 if (log_response_header)
2314 h2_log_response_header_lsx(r, &lsx);
2315
2316 unsigned char * const dst_in = dst;
2317 dst = lshpack_enc_encode(encoder, dst, dst_end, &lsx);
2318 if (dst == dst_in) {
2319 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2320 return;
2321 }
2322 }
2323
2324 alen += 2; /* "virtual" blank line ("\r\n") ending headers */
2325 r->resp_header_len = alen;
2326 /*(accounting for mod_accesslog and mod_rrdtool)*/
2327 chunkqueue * const wq = &r->write_queue;
2328 wq->bytes_in += (off_t)alen;
2329 wq->bytes_out += (off_t)alen;
2330
2331 const uint32_t dlen = (uint32_t)((char *)dst - tb->ptr);
2332 const uint32_t flags =
2333 (r->resp_body_finished && chunkqueue_is_empty(&r->write_queue))
2334 ? H2_FLAG_END_STREAM
2335 : 0;
2336 h2_send_hpack(r, con, tb->ptr, dlen, flags);
2337 }
2338
2339
2340 __attribute_cold__
2341 __attribute_noinline__
2342 static void
h2_send_headers_block(request_st * const r,connection * const con,const char * const hdrs,const uint32_t hlen,uint32_t flags)2343 h2_send_headers_block (request_st * const r, connection * const con, const char * const hdrs, const uint32_t hlen, uint32_t flags)
2344 {
2345 unsigned short hoff[8192]; /* max num header lines + 3; 16k on stack */
2346 hoff[0] = 1; /* number of lines */
2347 hoff[1] = 0; /* base offset for all lines */
2348 /*hoff[2] = ...;*/ /* offset from base for 2nd line */
2349 uint32_t rc = http_header_parse_hoff(hdrs, hlen, hoff);
2350 if (0 == rc || rc > USHRT_MAX || hoff[0] >= sizeof(hoff)/sizeof(hoff[0])-1
2351 || 1 == hoff[0]) { /*(initial blank line (should not happen))*/
2352 /* error if headers incomplete or too many header fields */
2353 log_error(r->conf.errh, __FILE__, __LINE__,
2354 "oversized response-header");
2355 hoff[0] = 1;
2356 hoff[1] = 0;
2357 if (http_header_parse_hoff(CONST_STR_LEN(":status: 500\r\n\r\n"),hoff)){
2358 /*(ignore for coverity; static string is successfully parsed)*/
2359 }
2360 }
2361
2362 /*(h2_init_con() resized h2r->tmp_buf to 64k; shared with r->tmp_buf)*/
2363 buffer * const tb = r->tmp_buf;
2364 force_assert(tb->size >= 65536);/*(sanity check; remove in future)*/
2365 unsigned char *dst = (unsigned char *)tb->ptr;
2366 unsigned char * const dst_end = (unsigned char *)tb->ptr + tb->size;
2367
2368 h2con * const h2c = con->h2;
2369 struct lshpack_enc * const encoder = &h2c->encoder;
2370 lsxpack_header_t lsx;
2371
2372 int i = 1;
2373 if (hdrs[0] == ':') {
2374 i = 2;
2375 /* expect first line to contain ":status: ..." if pseudo-header,
2376 * and expecting single pseudo-header for headers, zero for trailers */
2377 /*assert(0 == memcmp(hdrs, ":status: ", sizeof(":status: ")-1));*/
2378 memset(&lsx, 0, sizeof(lsxpack_header_t));
2379 *(const char **)&lsx.buf = hdrs;
2380 lsx.name_offset = 0;
2381 lsx.name_len = sizeof(":status")-1;
2382 lsx.val_offset = lsx.name_len + 2;
2383 lsx.val_len = 3;
2384 dst = lshpack_enc_encode(encoder, dst, dst_end, &lsx);
2385 if (dst == (unsigned char *)tb->ptr) {
2386 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2387 return;
2388 }
2389 }
2390
2391 /*(note: not expecting any other pseudo-headers)*/
2392
2393 /* note: expects field-names are lowercased (http_response_write_header())*/
2394
2395 for (; i < hoff[0]; ++i) {
2396 const char *k = hdrs + ((i > 1) ? hoff[i] : 0);
2397 const char *end = hdrs + hoff[i+1];
2398 const char *v = memchr(k, ':', end-k);
2399 /* XXX: DOES NOT handle line wrapping (which is deprecated by RFCs)
2400 * (not expecting line wrapping; not produced internally by lighttpd,
2401 * though possible from backends or with custom lua code)*/
2402 if (NULL == v || k == v) continue;
2403 uint32_t klen = v - k;
2404 if (0 == klen) continue;
2405 do { ++v; } while (*v == ' ' || *v == '\t'); /*(expect single ' ')*/
2406 #ifdef __COVERITY__
2407 /*(k has at least .:\n by now, so end[-2] valid)*/
2408 force_assert(end >= k + 2);
2409 #endif
2410 if (end[-2] != '\r') /*(header line must end "\r\n")*/
2411 continue;
2412 end -= 2;
2413 uint32_t vlen = end - v;
2414 if (0 == vlen) continue;
2415 memset(&lsx, 0, sizeof(lsxpack_header_t));
2416 *(const char **)&lsx.buf = hdrs;
2417 lsx.name_offset = k - hdrs;
2418 lsx.name_len = klen;
2419 lsx.val_offset = v - hdrs;
2420 lsx.val_len = vlen;
2421 unsigned char * const dst_in = dst;
2422 dst = lshpack_enc_encode(encoder, dst, dst_end, &lsx);
2423 if (dst == dst_in) {
2424 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2425 return;
2426 }
2427 }
2428 uint32_t dlen = (uint32_t)((char *)dst - tb->ptr);
2429 h2_send_hpack(r, con, tb->ptr, dlen, flags);
2430 }
2431
2432
2433 static void
h2_send_1xx_block(request_st * const r,connection * const con,const char * const hdrs,const uint32_t hlen)2434 h2_send_1xx_block (request_st * const r, connection * const con, const char * const hdrs, const uint32_t hlen)
2435 {
2436 h2_send_headers_block(r, con, hdrs, hlen, 0);
2437 }
2438
2439
2440 int
h2_send_1xx(request_st * const r,connection * const con)2441 h2_send_1xx (request_st * const r, connection * const con)
2442 {
2443 buffer * const b = chunk_buffer_acquire();
2444
2445 buffer_copy_string_len(b, CONST_STR_LEN(":status: "));
2446 buffer_append_int(b, r->http_status);
2447 for (uint32_t i = 0; i < r->resp_headers.used; ++i) {
2448 const data_string * const ds = (data_string *)r->resp_headers.data[i];
2449 const uint32_t klen = buffer_clen(&ds->key);
2450 const uint32_t vlen = buffer_clen(&ds->value);
2451 if (0 == klen || 0 == vlen) continue;
2452 buffer_append_str2(b, CONST_STR_LEN("\r\n"), ds->key.ptr, klen);
2453 buffer_append_str2(b, CONST_STR_LEN(": "), ds->value.ptr, vlen);
2454 }
2455 buffer_append_string_len(b, CONST_STR_LEN("\r\n\r\n"));
2456
2457 h2_send_1xx_block(r, con, BUF_PTR_LEN(b));
2458
2459 chunk_buffer_release(b);
2460 return 1; /* for http_response_send_1xx_cb */
2461 }
2462
2463
2464 void
h2_send_100_continue(request_st * const r,connection * const con)2465 h2_send_100_continue (request_st * const r, connection * const con)
2466 {
2467 /* 100 Continue is small and will always fit in SETTING_MAX_FRAME_SIZE;
2468 * i.e. there will not be any CONTINUATION frames here */
2469
2470 /* XXX: need to update hpack dynamic table,
2471 * or else could hard-code header block fragment
2472 * { 0x48, 0x03, 0x31, 0x30, 0x30 }
2473 */
2474
2475 /* short header block, so reuse shared code used for trailers
2476 * rather than adding something specific for ls-hpack here */
2477
2478 h2_send_1xx_block(r, con, CONST_STR_LEN(":status: 100\r\n\r\n"));
2479 }
2480
2481
2482 static void
2483 h2_send_end_stream_data (request_st * const r, connection * const con);
2484
2485 __attribute_cold__
2486 __attribute_noinline__
2487 static void
h2_send_end_stream_trailers(request_st * const r,connection * const con,const buffer * const trailers)2488 h2_send_end_stream_trailers (request_st * const r, connection * const con, const buffer * const trailers)
2489 {
2490 /*(trailers are merged into response headers if trailers are received before
2491 * sending response headers to client. However, if streaming response, then
2492 * trailers might need handling here)*/
2493
2494 /* parse and lowercase field-names in trailers */
2495 unsigned short hoff[8192]; /* max num header lines + 3; 16k on stack */
2496 hoff[0] = 1; /* number of lines */
2497 hoff[1] = 0; /* base offset for all lines */
2498 /*hoff[2] = ...;*/ /* offset from base for 2nd line */
2499 uint32_t rc = http_header_parse_hoff(BUF_PTR_LEN(trailers), hoff);
2500 if (0 == rc || rc > USHRT_MAX || hoff[0] >= sizeof(hoff)/sizeof(hoff[0])-1
2501 || 1 == hoff[0]) { /*(initial blank line)*/
2502 /* skip trailers if incomplete, too many fields, or too long (> 64k-1)*/
2503 h2_send_end_stream_data(r, con);
2504 return;
2505 }
2506
2507 char * const ptr = trailers->ptr;
2508 for (int i = 1; i < hoff[0]; ++i) {
2509 char *k = ptr + ((i > 1) ? hoff[i] : 0);
2510 if (*k == ':') {
2511 /*(pseudo-header should not appear in trailers)*/
2512 h2_send_end_stream_data(r, con);
2513 return;
2514 }
2515 const char * const colon = memchr(k, ':', ptr+hoff[i+1]-k);
2516 if (NULL == colon) continue;
2517 do {
2518 if (light_isupper(*k)) *k |= 0x20;
2519 } while (++k != colon);
2520 }
2521
2522 h2_send_headers_block(r, con, BUF_PTR_LEN(trailers), H2_FLAG_END_STREAM);
2523 }
2524
2525
2526 #if 0 /*(replaced by h2_send_headers())*/
2527 void
2528 h2_send_cqheaders (request_st * const r, connection * const con)
2529 {
2530 /*(assumes HTTP/1.1 response headers have been prepended as first chunk)
2531 *(future: if r->write_queue is bypassed for headers, adjust
2532 * r->write_queue bytes counts (bytes_in, bytes_out) with header len)*/
2533 /* note: expects field-names are lowercased (http_response_write_header())*/
2534 chunk * const c = r->write_queue.first;
2535 const uint32_t len = buffer_clen(c->mem) - (uint32_t)c->offset;
2536 uint32_t flags = (r->resp_body_finished && NULL == c->next)
2537 ? H2_FLAG_END_STREAM
2538 : 0;
2539 h2_send_headers_block(r, con, c->mem->ptr + c->offset, len, flags);
2540 chunkqueue_mark_written(&r->write_queue, len);
2541 }
2542 #endif
2543
2544
2545 #if 0
2546
2547 uint32_t
2548 h2_send_data (request_st * const r, connection * const con, const char *data, uint32_t dlen)
2549 {
2550 /* Note: dlen should be <= MAX_WRITE_LIMIT in order to share resources */
2551
2552 union {
2553 uint8_t c[12];
2554 uint32_t u[3]; /*(alignment)*/
2555 } dataframe = { { /*(big-endian numbers)*/
2556 0x00, 0x00, 0x00 /* padding for alignment; do not send */
2557 /* DATA */
2558 ,0x00, 0x00, 0x00 /* frame length (fill in below) */
2559 ,H2_FTYPE_DATA /* frame type */
2560 ,0x00 /* frame flags */
2561 ,0x00, 0x00, 0x00, 0x00 /* stream identifier (fill in below) */
2562 } };
2563
2564 dataframe.u[2] = htonl(r->h2id);
2565
2566 /* XXX: does not provide an optimization to send final set of data with
2567 * END_STREAM flag; see h2_send_end_stream_data() to end stream */
2568
2569 /* adjust stream and connection windows */
2570 /*assert(dlen <= INT32_MAX);*//* dlen should be <= MAX_WRITE_LIMIT */
2571 request_st * const h2r = &con->request;
2572 if (r->h2_swin < 0) return 0;
2573 if (h2r->h2_swin < 0) return 0;
2574 if ((int32_t)dlen > r->h2_swin) dlen = (uint32_t)r->h2_swin;
2575 if ((int32_t)dlen > h2r->h2_swin) dlen = (uint32_t)h2r->h2_swin;
2576 if (0 == dlen) return 0;
2577 r->h2_swin -= (int32_t)dlen;
2578 h2r->h2_swin -= (int32_t)dlen;
2579
2580 /* XXX: future: should have an interface which processes chunkqueue
2581 * and takes string refs to mmap FILE_CHUNK to avoid extra copying
2582 * since the result is likely to be consumed by TLS modules */
2583
2584 /*(approximate space needed for frames (header + payload)
2585 * with slight over-estimate of 16 bytes per frame header (> 9)
2586 * and minimum SETTING_MAX_FRAME_SIZE of 16k (could be larger)
2587 * (dlen >> 14)+1 is num 16k frames needed, multiplied by 16 bytes
2588 * per frame can be appoximated with (dlen>>10) + 9)*/
2589 buffer * const b =
2590 chunkqueue_append_buffer_open_sz(con->write_queue, dlen + (dlen>>10) + 9);
2591 char * restrict ptr = b->ptr;
2592 h2con * const h2c = con->h2;
2593 const uint32_t fsize = h2c->s_max_frame_size;
2594 uint32_t sent = 0;
2595 do {
2596 const uint32_t len = dlen < fsize ? dlen : fsize;
2597 dataframe.c[3] = (len >> 16) & 0xFF; /*(off +3 to skip over align pad)*/
2598 dataframe.c[4] = (len >> 8) & 0xFF;
2599 dataframe.c[5] = (len ) & 0xFF;
2600 #if 0
2601 chunkqueue_append_mem(con->write_queue, /*(+3 to skip over align pad)*/
2602 (const char *)dataframe.c+3, sizeof(dataframe)-3);
2603 chunkqueue_append_mem(con->write_queue, data, len);
2604 #else
2605 memcpy(ptr, dataframe.c+3, sizeof(dataframe)-3);
2606 memcpy(ptr+sizeof(dataframe)-3, data, len);
2607 ptr += len + sizeof(dataframe)-3;
2608 #endif
2609 data += len;
2610 sent += len;
2611 dlen -= len;
2612 } while (dlen);
2613 buffer_truncate(b, (uint32_t)(ptr - b->ptr));
2614 chunkqueue_append_buffer_commit(con->write_queue);
2615 return sent;
2616 }
2617
2618 #endif
2619
2620
2621 uint32_t
h2_send_cqdata(request_st * const r,connection * const con,chunkqueue * const cq,uint32_t dlen)2622 h2_send_cqdata (request_st * const r, connection * const con, chunkqueue * const cq, uint32_t dlen)
2623 {
2624 /* Note: dlen should be <= MAX_WRITE_LIMIT in order to share resources */
2625
2626 union {
2627 uint8_t c[12];
2628 uint32_t u[3]; /*(alignment)*/
2629 } dataframe = { { /*(big-endian numbers)*/
2630 0x00, 0x00, 0x00 /* padding for alignment; do not send */
2631 /* DATA */
2632 ,0x00, 0x00, 0x00 /* frame length (fill in below) */
2633 ,H2_FTYPE_DATA /* frame type */
2634 ,0x00 /* frame flags */
2635 ,0x00, 0x00, 0x00, 0x00 /* stream identifier (fill in below) */
2636 } };
2637
2638 dataframe.u[2] = htonl(r->h2id);
2639
2640 /* XXX: does not provide an optimization to send final set of data with
2641 * END_STREAM flag; see h2_send_end_stream_data() to end stream */
2642
2643 /* adjust stream and connection windows */
2644 /*assert(dlen <= INT32_MAX);*//* dlen should be <= MAX_WRITE_LIMIT */
2645 request_st * const h2r = &con->request;
2646 if (r->h2_swin < 0) return 0;
2647 if (h2r->h2_swin < 0) return 0;
2648 if ((int32_t)dlen > r->h2_swin) dlen = (uint32_t)r->h2_swin;
2649 if ((int32_t)dlen > h2r->h2_swin) dlen = (uint32_t)h2r->h2_swin;
2650 const off_t cqlen = chunkqueue_length(cq);
2651 if ((int32_t)dlen > cqlen) dlen = (uint32_t)cqlen;
2652 /*(note: must temporarily disable next line when running h2spec since
2653 * some h2spec tests expect 1-byte DATA frame, not a deferred response)*/
2654 else if (dlen < 2048 && cqlen >= 2048) return 0;
2655 if (0 == dlen) return 0;
2656
2657 /* XXX: future: should have an interface which processes chunkqueue
2658 * and takes string refs to mmap FILE_CHUNK to avoid extra copying
2659 * since the result is likely to be consumed by TLS modules */
2660
2661 h2con * const h2c = con->h2;
2662 const uint32_t fsize = h2c->s_max_frame_size;
2663 uint32_t sent = 0;
2664 do {
2665 if (cq->first->type == FILE_CHUNK) {
2666 /* combine frame header and data into single mem chunk buffer
2667 * and adjust to fit efficiently into power-2 sized buffer
2668 * (default and minimum HTTP/2 SETTINGS_MAX_FRAME_SIZE is 16k)
2669 * (default send buffer size in lighttpd TLS modules is 16k)
2670 * (read into memory since likely needed for HTTP/2 over TLS,
2671 * and to avoid many small calls to dup(), sendfile(), close())
2672 * (reading here into single chunk buffer is likely more efficient
2673 * than reference counting file chunks split and duplicated by
2674 * chunkqueue_steal() into 16k chunks, and alternating with 8k
2675 * chunk buffers containing 9 byte HTTP/2 header frame) */
2676 const uint32_t len = dlen < fsize ? dlen : fsize-9;
2677 uint32_t blen = len;
2678 buffer * const b = /*(sizeof(dataframe)-3 == 9)*/
2679 chunkqueue_append_buffer_open_sz(con->write_queue, 9+len);
2680 char *data = b->ptr+9; /*(note: not including +1 to _open_sz)*/
2681 if (0 == chunkqueue_peek_data(cq, &data, &blen, r->conf.errh)
2682 && blen == len) {
2683 dlen -= len;
2684 sent += len;
2685 dataframe.c[3] = (len >> 16) & 0xFF; /*(+3 to skip align pad)*/
2686 dataframe.c[4] = (len >> 8) & 0xFF;
2687 dataframe.c[5] = (len ) & 0xFF;
2688 memcpy(b->ptr,(const char *)dataframe.c+3, sizeof(dataframe)-3);
2689 if (b->ptr+9 != data)
2690 memcpy(b->ptr+9, data, len);
2691 buffer_commit(b, 9+len);
2692 chunkqueue_append_buffer_commit(con->write_queue);
2693 chunkqueue_mark_written(cq, len);
2694 continue;
2695 }
2696
2697 /*(else remove empty last chunk and fall through to below)*/
2698 chunkqueue_remove_empty_chunks(cq);
2699 }
2700
2701 const uint32_t len = dlen < fsize ? dlen : fsize;
2702 dlen -= len;
2703 sent += len;
2704 dataframe.c[3] = (len >> 16) & 0xFF; /*(off +3 to skip over align pad)*/
2705 dataframe.c[4] = (len >> 8) & 0xFF;
2706 dataframe.c[5] = (len ) & 0xFF;
2707 chunkqueue_append_mem(con->write_queue, /*(+3 to skip over align pad)*/
2708 (const char *)dataframe.c+3, sizeof(dataframe)-3);
2709 chunkqueue_steal(con->write_queue, cq, (off_t)len);
2710 } while (dlen);
2711 r->h2_swin -= (int32_t)sent;
2712 h2r->h2_swin -= (int32_t)sent;
2713 return sent;
2714 }
2715
2716
2717 __attribute_noinline__
2718 static void
h2_send_end_stream_data(request_st * const r,connection * const con)2719 h2_send_end_stream_data (request_st * const r, connection * const con)
2720 {
2721 if (r->h2state != H2_STATE_HALF_CLOSED_LOCAL) {
2722 union {
2723 uint8_t c[12];
2724 uint32_t u[3]; /*(alignment)*/
2725 } dataframe = { { /*(big-endian numbers)*/
2726 0x00, 0x00, 0x00 /* padding for alignment; do not send */
2727 /* DATA */
2728 ,0x00, 0x00, 0x00 /* frame length */
2729 ,H2_FTYPE_DATA /* frame type */
2730 ,H2_FLAG_END_STREAM /* frame flags */
2731 ,0x00, 0x00, 0x00, 0x00 /* stream identifier (fill in below) */
2732 } };
2733
2734 dataframe.u[2] = htonl(r->h2id);
2735 /*(ignore window updates when sending 0-length DATA frame with END_STREAM)*/
2736 chunkqueue_append_mem(con->write_queue, /*(+3 to skip over align pad)*/
2737 (const char *)dataframe.c+3, sizeof(dataframe)-3);
2738 }
2739
2740 if (r->h2state != H2_STATE_HALF_CLOSED_REMOTE) {
2741 /* set timestamp for comparison; not tracking individual stream ids */
2742 h2con * const h2c = con->h2;
2743 h2c->half_closed_ts = log_monotonic_secs;
2744 /* indicate to peer that no more DATA should be sent from peer */
2745 h2_send_rst_stream_id(r->h2id, con, H2_E_NO_ERROR);
2746 }
2747 r->h2state = H2_STATE_CLOSED;
2748 }
2749
2750
2751 void
h2_send_end_stream(request_st * const r,connection * const con)2752 h2_send_end_stream (request_st * const r, connection * const con)
2753 {
2754 if (r->h2state == H2_STATE_CLOSED) return;
2755 if (r->state != CON_STATE_ERROR && r->resp_body_finished) {
2756 /* CON_STATE_RESPONSE_END */
2757 if (r->gw_dechunk && r->gw_dechunk->done
2758 && !buffer_is_unset(&r->gw_dechunk->b))
2759 h2_send_end_stream_trailers(r, con, &r->gw_dechunk->b);
2760 else
2761 h2_send_end_stream_data(r, con);
2762 }
2763 else { /* CON_STATE_ERROR */
2764 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2765 }
2766 }
2767
2768
2769 /*
2770 * (XXX: might move below to separate file)
2771 */
2772 #include "base64.h"
2773 #include "chunk.h"
2774 #include "plugins.h"
2775 #include "plugin_config.h"
2776 #include "reqpool.h"
2777
2778
2779 static request_st *
h2_init_stream(request_st * const h2r,connection * const con)2780 h2_init_stream (request_st * const h2r, connection * const con)
2781 {
2782 h2con * const h2c = con->h2;
2783 ++con->request_count;
2784 force_assert(h2c->rused < sizeof(h2c->r)/sizeof(*h2c->r));
2785 /* initialize stream as subrequest (request_st *) */
2786 request_st * const r = request_acquire(con);
2787 /* XXX: TODO: assign default priority, etc.
2788 * Perhaps store stream id and priority in separate table */
2789 h2c->r[h2c->rused++] = r;
2790 r->h2_rwin = 65536; /* must keep in sync with h2_init_con() */
2791 r->h2_swin = h2c->s_initial_window_size;
2792 r->h2_rwin_fudge = 0;
2793 /* combine priority 'urgency' value and invert 'incremental' boolean
2794 * for easy (ascending) sorting by urgency and then incremental before
2795 * non-incremental */
2796 r->h2_prio = (3 << 1) | !0; /*(default urgency=3, incremental=0)*/
2797 r->http_version = HTTP_VERSION_2;
2798
2799 /* copy config state from h2r */
2800 server * const srv = con->srv;
2801 const uint32_t used = srv->config_context->used;
2802 r->conditional_is_valid = h2r->conditional_is_valid;
2803 memcpy(r->cond_cache, h2r->cond_cache, used * sizeof(cond_cache_t));
2804 #ifdef HAVE_PCRE
2805 if (srv->config_captures)
2806 memcpy(r->cond_match, h2r->cond_match,
2807 srv->config_captures * sizeof(cond_match_t *));
2808 #endif
2809 /*(see request_config_reset() and request_reset_ex())*/
2810 r->server_name = h2r->server_name;
2811 memcpy(&r->conf, &h2r->conf, sizeof(request_config));
2812
2813 /* stream id must be assigned by caller */
2814 return r;
2815 }
2816
2817
2818 static void
h2_release_stream(request_st * const r,connection * const con)2819 h2_release_stream (request_st * const r, connection * const con)
2820 {
2821 if (r->http_status) {
2822 /* (see comment in connection_handle_response_end_state()) */
2823 plugins_call_handle_request_done(r);
2824
2825 #if 0
2826 /* (fuzzy accounting for mod_accesslog, mod_rrdtool to avoid
2827 * double counting, but HTTP/2 framing and HPACK-encoded headers in
2828 * con->read_queue and con->write_queue are not equivalent to the
2829 * HPACK-decoded headers and request and response bodies in stream
2830 * r->read_queue and r->write_queue) */
2831 /* DISABLED since mismatches invalidate the relationship between
2832 * con->bytes_in and con->bytes_out */
2833 con->read_queue->bytes_in -= r->read_queue.bytes_in;
2834 con->write_queue->bytes_out -= r->write_queue.bytes_out;
2835 #else
2836 UNUSED(con);
2837 #endif
2838 }
2839
2840 request_release(r);
2841 }
2842
2843
2844 void
h2_retire_stream(request_st * r,connection * const con)2845 h2_retire_stream (request_st *r, connection * const con)
2846 {
2847 if (r == NULL) return; /*(should not happen)*/
2848 h2con * const h2c = con->h2;
2849 request_st ** const ar = h2c->r;
2850 uint32_t i = 0, rused = h2c->rused;
2851 while (i < rused && ar[i] != r) ++i;
2852 if (i != rused) {
2853 /* swap with last element; might need to revisit if ordered by priority */
2854 /*if (i != --rused) ar[i] = ar[rused];*/
2855 /* shift elements; currently choosing to preserve order requested */
2856 if (i != --rused) memmove(ar+i, ar+i+1, (rused-i)*sizeof(*ar));
2857 h2c->r[(h2c->rused = rused)] = NULL;
2858 h2_release_stream(r, con);
2859 }
2860 /*else ... should not happen*/
2861 }
2862
2863
2864 void
h2_retire_con(request_st * const h2r,connection * const con)2865 h2_retire_con (request_st * const h2r, connection * const con)
2866 {
2867 h2con * const h2c = con->h2;
2868 if (NULL == h2c) return;
2869
2870 if (h2r->state != CON_STATE_ERROR) { /*(CON_STATE_RESPONSE_END)*/
2871 h2_send_goaway(con, H2_E_NO_ERROR);
2872 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
2873 /*(unexpected if CON_STATE_RESPONSE_END)*/
2874 request_st * const r = h2c->r[i];
2875 h2_send_rst_stream(r, con, H2_E_INTERNAL_ERROR);
2876 h2_release_stream(r, con);
2877 }
2878 if (!chunkqueue_is_empty(con->write_queue)) {
2879 /* similar to connection_handle_write() but without error checks,
2880 * without MAX_WRITE_LIMIT, and without connection throttling */
2881 /*h2r->conf.bytes_per_second = 0;*/ /* disable rate limit */
2882 /*h2r->conf.global_bytes_per_second = 0;*/ /* disable rate limit */
2883 /*con->traffic_limit_reached = 0;*/
2884 chunkqueue * const cq = con->write_queue;
2885 const off_t len = chunkqueue_length(cq);
2886 off_t written = cq->bytes_out;
2887 con->network_write(con, cq, len);
2888 /*(optional accounting)*/
2889 written = cq->bytes_out - written;
2890 con->bytes_written_cur_second += written;
2891 if (h2r->conf.global_bytes_per_second_cnt_ptr)
2892 *(h2r->conf.global_bytes_per_second_cnt_ptr) += written;
2893 }
2894 }
2895 else { /* CON_STATE_ERROR */
2896 for (uint32_t i = 0, rused = h2c->rused; i < rused; ++i) {
2897 request_st * const r = h2c->r[i];
2898 h2_release_stream(r, con);
2899 }
2900 /* XXX: perhaps attempt to send GOAWAY? Not when CON_STATE_ERROR */
2901 }
2902
2903 con->h2 = NULL;
2904
2905 /* future: might keep a pool of reusable (h2con *) */
2906 lshpack_enc_cleanup(&h2c->encoder);
2907 lshpack_dec_cleanup(&h2c->decoder);
2908 free(h2c);
2909 }
2910
2911
2912 static void
h2_con_upgrade_h2c(request_st * const h2r,const buffer * const http2_settings)2913 h2_con_upgrade_h2c (request_st * const h2r, const buffer * const http2_settings)
2914 {
2915 /* HTTP/1.1 101 Switching Protocols
2916 * Connection: Upgrade
2917 * Upgrade: h2c
2918 */
2919 #if 1
2920 static const char switch_proto[] = "HTTP/1.1 101 Switching Protocols\r\n"
2921 "Connection: Upgrade\r\n"
2922 "Upgrade: h2c\r\n\r\n";
2923 chunkqueue_append_mem(&h2r->write_queue,
2924 CONST_STR_LEN(switch_proto));
2925 h2r->resp_header_len = sizeof(switch_proto)-1;
2926 #else
2927 h2r->http_status = 101;
2928 http_header_response_set(h2r, HTTP_HEADER_UPGRADE, CONST_STR_LEN("Upgrade"),
2929 CONST_STR_LEN("h2c"));
2930 http_response_write_header(h2r);
2931 http_response_reset(h2r);
2932 h2r->http_status = 0;
2933 #endif
2934
2935 connection * const con = h2r->con;
2936 h2_init_con(h2r, con, http2_settings);
2937 if (con->h2->sent_goaway) return;
2938
2939 con->h2->h2_cid = 1; /* stream id 1 is assigned to h2c upgrade */
2940
2941 /* copy request state from &con->request to subrequest r
2942 * XXX: would be nice if there were a cleaner way to do this
2943 * (This is fragile and must be kept in-sync with request_st in request.h)*/
2944
2945 request_st * const r = h2_init_stream(h2r, con);
2946 /*(undo double-count; already incremented in CON_STATE_REQUEST_START)*/
2947 --con->request_count;
2948 r->state = CON_STATE_REQUEST_END;
2949 r->http_status = 0;
2950 r->http_method = h2r->http_method;
2951 r->h2state = H2_STATE_HALF_CLOSED_REMOTE;
2952 r->h2id = 1;
2953 r->rqst_htags = h2r->rqst_htags;
2954 h2r->rqst_htags = 0;
2955 r->rqst_header_len = h2r->rqst_header_len;
2956 h2r->rqst_header_len = 0;
2957 r->rqst_headers = h2r->rqst_headers; /* copy struct */
2958 memset(&h2r->rqst_headers, 0, sizeof(array));
2959 r->uri = h2r->uri; /* copy struct */
2960 #if 0
2961 r->physical = h2r->physical; /* copy struct */
2962 r->env = h2r->env; /* copy struct */
2963 #endif
2964 memset(&h2r->rqst_headers, 0, sizeof(array));
2965 memset(&h2r->uri, 0, sizeof(request_uri));
2966 #if 0
2967 memset(&h2r->physical, 0, sizeof(physical));
2968 memset(&h2r->env, 0, sizeof(array));
2969 #endif
2970 #if 0 /* expect empty request body */
2971 r->reqbody_length = h2r->reqbody_length; /* currently always 0 */
2972 r->te_chunked = h2r->te_chunked; /* must be 0 */
2973 r->resp_body_scratchpad = h2r->resp_body_scratchpad; /*(not started yet)*/
2974 swap(&r->reqbody_queue,&h2r->reqbody_queue);/*currently always empty queue*/
2975 #endif
2976 r->http_host = h2r->http_host;
2977 h2r->http_host = NULL;
2978 #if 0
2979 r->server_name = h2r->server_name;
2980 h2r->server_name = &h2r->uri.authority; /*(is not null)*/
2981 #endif
2982 r->target = h2r->target; /* copy struct */
2983 r->target_orig = h2r->target_orig; /* copy struct */
2984 #if 0
2985 r->pathinfo = h2r->pathinfo; /* copy struct */
2986 r->server_name_buf = h2r->server_name_buf; /* copy struct */
2987 #endif
2988 memset(&h2r->target, 0, sizeof(buffer));
2989 memset(&h2r->target_orig, 0, sizeof(buffer));
2990 #if 0
2991 memset(&h2r->pathinfo, 0, sizeof(buffer));
2992 memset(&h2r->server_name_buf, 0, sizeof(buffer));
2993 #endif
2994 #if 0
2995 /* skip copying response structures, other state not yet modified in h2r */
2996 /* r write_queue and read_queue are intentionally separate from h2r */
2997 /* r->gw_dechunk must be NULL for HTTP/2 */
2998 /* bytes_written_ckpt and bytes_read_ckpt are for HTTP/1.1 */
2999 /* error handlers have not yet been set */
3000 #endif
3001 #if 0
3002 r->loops_per_request = h2r->loops_per_request;
3003 r->async_callback = h2r->async_callback;
3004 #endif
3005 r->keep_alive = h2r->keep_alive;
3006 r->tmp_buf = h2r->tmp_buf; /* shared; same as srv->tmp_buf */
3007 r->start_hp = h2r->start_hp; /* copy struct */
3008
3009 /* Note: HTTP/1.1 101 Switching Protocols is not immediately written to
3010 * the network here. As this is called from cleartext Upgrade: h2c,
3011 * we choose to delay sending the status until the beginning of the response
3012 * to the HTTP/1.1 request which included Upgrade: h2c */
3013 }
3014
3015
3016 int
h2_check_con_upgrade_h2c(request_st * const r)3017 h2_check_con_upgrade_h2c (request_st * const r)
3018 {
3019 /* RFC7540 3.2 Starting HTTP/2 for "http" URIs */
3020
3021 buffer *http_connection, *http2_settings;
3022 buffer *upgrade = http_header_request_get(r, HTTP_HEADER_UPGRADE,
3023 CONST_STR_LEN("Upgrade"));
3024 if (NULL == upgrade) return 0;
3025 http_connection = http_header_request_get(r, HTTP_HEADER_CONNECTION,
3026 CONST_STR_LEN("Connection"));
3027 if (NULL == http_connection) {
3028 http_header_request_unset(r, HTTP_HEADER_UPGRADE,
3029 CONST_STR_LEN("Upgrade"));
3030 return 0;
3031 }
3032 if (r->http_version != HTTP_VERSION_1_1) {
3033 http_header_request_unset(r, HTTP_HEADER_UPGRADE,
3034 CONST_STR_LEN("Upgrade"));
3035 http_header_remove_token(http_connection, CONST_STR_LEN("Upgrade"));
3036 return 0;
3037 }
3038
3039 if (!http_header_str_contains_token(BUF_PTR_LEN(upgrade),
3040 CONST_STR_LEN("h2c")))
3041 return 0;
3042
3043 http2_settings = http_header_request_get(r, HTTP_HEADER_HTTP2_SETTINGS,
3044 CONST_STR_LEN("HTTP2-Settings"));
3045 if (NULL != http2_settings) {
3046 if (0 == r->reqbody_length) {
3047 buffer * const b = r->tmp_buf;
3048 buffer_clear(b);
3049 if (r->conf.h2proto > 1/*(must be enabled with server.h2c feature)*/
3050 && !r->con->is_ssl_sock /*(disallow h2c over TLS socket)*/
3051 &&
3052 http_header_str_contains_token(BUF_PTR_LEN(http_connection),
3053 CONST_STR_LEN("HTTP2-Settings"))
3054 && buffer_append_base64_decode(b, BUF_PTR_LEN(http2_settings),
3055 BASE64_URL)) {
3056 h2_con_upgrade_h2c(r, b);
3057 r->http_version = HTTP_VERSION_2;
3058 } /* else ignore if invalid base64 */
3059 }
3060 else {
3061 /* ignore Upgrade: h2c if request body present since we do not
3062 * (currently) handle request body before transition to h2c */
3063 /* RFC7540 3.2 Requests that contain a payload body MUST be sent
3064 * in their entirety before the client can send HTTP/2 frames. */
3065 }
3066 http_header_request_unset(r, HTTP_HEADER_HTTP2_SETTINGS,
3067 CONST_STR_LEN("HTTP2-Settings"));
3068 http_header_remove_token(http_connection, CONST_STR_LEN("HTTP2-Settings"));
3069 } /* else ignore Upgrade: h2c; HTTP2-Settings required for Upgrade: h2c */
3070 http_header_request_unset(r, HTTP_HEADER_UPGRADE,
3071 CONST_STR_LEN("Upgrade"));
3072 http_header_remove_token(http_connection, CONST_STR_LEN("Upgrade"));
3073 return (r->http_version == HTTP_VERSION_2);
3074 }
3075