1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote
16 * products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
34 * table to a dedicated structure indexed by the remote IP address. It keeps
35 * information on the measured TCP parameters of past TCP sessions to allow
36 * better initial start values to be used with later connections to/from the
37 * same source. Depending on the network parameters (delay, max MTU,
38 * congestion window) between local and remote sites, this can lead to
39 * significant speed-ups for new TCP connections after the first one.
40 *
41 * Due to the tcp_hostcache, all TCP-specific metrics information in the
42 * routing table have been removed. The inpcb no longer keeps a pointer to
43 * the routing entry, and protocol-initiated route cloning has been removed
44 * as well. With these changes, the routing table has gone back to being
45 * more lightwight and only carries information related to packet forwarding.
46 *
47 * tcp_hostcache is designed for multiple concurrent access in SMP
48 * environments and high contention. All bucket rows have their own lock and
49 * thus multiple lookups and modifies can be done at the same time as long as
50 * they are in different bucket rows. If a request for insertion of a new
51 * record can't be satisfied, it simply returns an empty structure. Nobody
52 * and nothing outside of tcp_hostcache.c will ever point directly to any
53 * entry in the tcp_hostcache. All communication is done in an
54 * object-oriented way and only functions of tcp_hostcache will manipulate
55 * hostcache entries. Otherwise, we are unable to achieve good behaviour in
56 * concurrent access situations. Since tcp_hostcache is only caching
57 * information, there are no fatal consequences if we either can't satisfy
58 * any particular request or have to drop/overwrite an existing entry because
59 * of bucket limit memory constrains.
60 */
61
62 /*
63 * Many thanks to jlemon for basic structure of tcp_syncache which is being
64 * followed here.
65 */
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include "opt_inet6.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/hash.h>
75 #include <sys/jail.h>
76 #include <sys/kernel.h>
77 #include <sys/lock.h>
78 #include <sys/mutex.h>
79 #include <sys/malloc.h>
80 #include <sys/proc.h>
81 #include <sys/sbuf.h>
82 #include <sys/socket.h>
83 #include <sys/socketvar.h>
84 #include <sys/sysctl.h>
85
86 #include <net/if.h>
87 #include <net/if_var.h>
88 #include <net/route.h>
89 #include <net/vnet.h>
90
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/in_var.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/ip_var.h>
97 #ifdef INET6
98 #include <netinet/ip6.h>
99 #include <netinet6/ip6_var.h>
100 #endif
101 #include <netinet/tcp.h>
102 #include <netinet/tcp_var.h>
103 #include <netinet/tcp_hostcache.h>
104 #ifdef INET6
105 #include <netinet6/tcp6_var.h>
106 #endif
107
108 #include <vm/uma.h>
109
110 /* Arbitrary values */
111 #define TCP_HOSTCACHE_HASHSIZE 512
112 #define TCP_HOSTCACHE_BUCKETLIMIT 30
113 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */
114 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */
115
116 VNET_DEFINE_STATIC(struct tcp_hostcache, tcp_hostcache);
117 #define V_tcp_hostcache VNET(tcp_hostcache)
118
119 VNET_DEFINE_STATIC(struct callout, tcp_hc_callout);
120 #define V_tcp_hc_callout VNET(tcp_hc_callout)
121
122 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
123 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
124 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
125 static int sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS);
126 static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
127 static void tcp_hc_purge_internal(int);
128 static void tcp_hc_purge(void *);
129
130 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache,
131 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
132 "TCP Host cache");
133
134 VNET_DEFINE(int, tcp_use_hostcache) = 1;
135 #define V_tcp_use_hostcache VNET(tcp_use_hostcache)
136 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
137 &VNET_NAME(tcp_use_hostcache), 0,
138 "Enable the TCP hostcache");
139
140 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
141 &VNET_NAME(tcp_hostcache.cache_limit), 0,
142 "Overall entry limit for hostcache");
143
144 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
145 &VNET_NAME(tcp_hostcache.hashsize), 0,
146 "Size of TCP hostcache hashtable");
147
148 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
149 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
150 "Per-bucket hash limit for hostcache");
151
152 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
153 &VNET_NAME(tcp_hostcache.cache_count), 0,
154 "Current number of entries in hostcache");
155
156 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
157 &VNET_NAME(tcp_hostcache.expire), 0,
158 "Expire time of TCP hostcache entries");
159
160 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
161 &VNET_NAME(tcp_hostcache.prune), 0,
162 "Time between purge runs");
163
164 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
165 &VNET_NAME(tcp_hostcache.purgeall), 0,
166 "Expire all entries on next purge run");
167
168 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
169 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
170 0, 0, sysctl_tcp_hc_list, "A",
171 "List of all hostcache entries");
172
173 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, histo,
174 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
175 0, 0, sysctl_tcp_hc_histo, "A",
176 "Print a histogram of hostcache hashbucket utilization");
177
178 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
179 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
180 NULL, 0, sysctl_tcp_hc_purgenow, "I",
181 "Immediately purge all entries");
182
183 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
184
185 /* Use jenkins_hash32(), as in other parts of the tcp stack */
186 #define HOSTCACHE_HASH(ip) \
187 (jenkins_hash32((uint32_t *)(ip), 1, V_tcp_hostcache.hashsalt) & \
188 V_tcp_hostcache.hashmask)
189
190 #define HOSTCACHE_HASH6(ip6) \
191 (jenkins_hash32((uint32_t *)&((ip6)->s6_addr32[0]), 4, \
192 V_tcp_hostcache.hashsalt) & \
193 V_tcp_hostcache.hashmask)
194
195 #define THC_LOCK(lp) mtx_lock(lp)
196 #define THC_UNLOCK(lp) mtx_unlock(lp)
197
198 void
tcp_hc_init(void)199 tcp_hc_init(void)
200 {
201 u_int cache_limit;
202 int i;
203
204 /*
205 * Initialize hostcache structures.
206 */
207 atomic_store_int(&V_tcp_hostcache.cache_count, 0);
208 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
209 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
210 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
211 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
212 V_tcp_hostcache.hashsalt = arc4random();
213
214 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
215 &V_tcp_hostcache.hashsize);
216 if (!powerof2(V_tcp_hostcache.hashsize)) {
217 printf("WARNING: hostcache hash size is not a power of 2.\n");
218 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
219 }
220 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
221
222 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
223 &V_tcp_hostcache.bucket_limit);
224
225 cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
226 V_tcp_hostcache.cache_limit = cache_limit;
227 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
228 &V_tcp_hostcache.cache_limit);
229 if (V_tcp_hostcache.cache_limit > cache_limit)
230 V_tcp_hostcache.cache_limit = cache_limit;
231
232 /*
233 * Allocate the hash table.
234 */
235 V_tcp_hostcache.hashbase = (struct hc_head *)
236 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
237 M_HOSTCACHE, M_WAITOK | M_ZERO);
238
239 /*
240 * Initialize the hash buckets.
241 */
242 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
243 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
244 V_tcp_hostcache.hashbase[i].hch_length = 0;
245 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
246 NULL, MTX_DEF);
247 }
248
249 /*
250 * Allocate the hostcache entries.
251 */
252 V_tcp_hostcache.zone =
253 uma_zcreate("hostcache", sizeof(struct hc_metrics),
254 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
255 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
256
257 /*
258 * Set up periodic cache cleanup.
259 */
260 callout_init(&V_tcp_hc_callout, 1);
261 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
262 tcp_hc_purge, curvnet);
263 }
264
265 #ifdef VIMAGE
266 void
tcp_hc_destroy(void)267 tcp_hc_destroy(void)
268 {
269 int i;
270
271 callout_drain(&V_tcp_hc_callout);
272
273 /* Purge all hc entries. */
274 tcp_hc_purge_internal(1);
275
276 /* Free the uma zone and the allocated hash table. */
277 uma_zdestroy(V_tcp_hostcache.zone);
278
279 for (i = 0; i < V_tcp_hostcache.hashsize; i++)
280 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
281 free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
282 }
283 #endif
284
285 /*
286 * Internal function: look up an entry in the hostcache or return NULL.
287 *
288 * If an entry has been returned, the caller becomes responsible for
289 * unlocking the bucket row after he is done reading/modifying the entry.
290 */
291 static struct hc_metrics *
tcp_hc_lookup(struct in_conninfo * inc)292 tcp_hc_lookup(struct in_conninfo *inc)
293 {
294 int hash;
295 struct hc_head *hc_head;
296 struct hc_metrics *hc_entry;
297
298 if (!V_tcp_use_hostcache)
299 return NULL;
300
301 KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
302
303 /*
304 * Hash the foreign ip address.
305 */
306 if (inc->inc_flags & INC_ISIPV6)
307 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
308 else
309 hash = HOSTCACHE_HASH(&inc->inc_faddr);
310
311 hc_head = &V_tcp_hostcache.hashbase[hash];
312
313 /*
314 * Acquire lock for this bucket row; we release the lock if we don't
315 * find an entry, otherwise the caller has to unlock after he is
316 * done.
317 */
318 THC_LOCK(&hc_head->hch_mtx);
319
320 /*
321 * Iterate through entries in bucket row looking for a match.
322 */
323 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
324 if (inc->inc_flags & INC_ISIPV6) {
325 /* XXX: check ip6_zoneid */
326 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
327 sizeof(inc->inc6_faddr)) == 0)
328 return hc_entry;
329 } else {
330 if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
331 sizeof(inc->inc_faddr)) == 0)
332 return hc_entry;
333 }
334 }
335
336 /*
337 * We were unsuccessful and didn't find anything.
338 */
339 THC_UNLOCK(&hc_head->hch_mtx);
340 return NULL;
341 }
342
343 /*
344 * Internal function: insert an entry into the hostcache or return NULL if
345 * unable to allocate a new one.
346 *
347 * If an entry has been returned, the caller becomes responsible for
348 * unlocking the bucket row after he is done reading/modifying the entry.
349 */
350 static struct hc_metrics *
tcp_hc_insert(struct in_conninfo * inc)351 tcp_hc_insert(struct in_conninfo *inc)
352 {
353 int hash;
354 struct hc_head *hc_head;
355 struct hc_metrics *hc_entry;
356
357 if (!V_tcp_use_hostcache)
358 return NULL;
359
360 KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
361
362 /*
363 * Hash the foreign ip address.
364 */
365 if (inc->inc_flags & INC_ISIPV6)
366 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
367 else
368 hash = HOSTCACHE_HASH(&inc->inc_faddr);
369
370 hc_head = &V_tcp_hostcache.hashbase[hash];
371
372 /*
373 * Acquire lock for this bucket row; we release the lock if we don't
374 * find an entry, otherwise the caller has to unlock after he is
375 * done.
376 */
377 THC_LOCK(&hc_head->hch_mtx);
378
379 /*
380 * If the bucket limit is reached, reuse the least-used element.
381 */
382 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
383 atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) {
384 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
385 /*
386 * At first we were dropping the last element, just to
387 * reacquire it in the next two lines again, which isn't very
388 * efficient. Instead just reuse the least used element.
389 * We may drop something that is still "in-use" but we can be
390 * "lossy".
391 * Just give up if this bucket row is empty and we don't have
392 * anything to replace.
393 */
394 if (hc_entry == NULL) {
395 THC_UNLOCK(&hc_head->hch_mtx);
396 return NULL;
397 }
398 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
399 KASSERT(V_tcp_hostcache.hashbase[hash].hch_length > 0 &&
400 V_tcp_hostcache.hashbase[hash].hch_length <=
401 V_tcp_hostcache.bucket_limit,
402 ("tcp_hostcache: bucket length range violated at %u: %u",
403 hash, V_tcp_hostcache.hashbase[hash].hch_length));
404 V_tcp_hostcache.hashbase[hash].hch_length--;
405 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
406 TCPSTAT_INC(tcps_hc_bucketoverflow);
407 #if 0
408 uma_zfree(V_tcp_hostcache.zone, hc_entry);
409 #endif
410 } else {
411 /*
412 * Allocate a new entry, or balk if not possible.
413 */
414 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
415 if (hc_entry == NULL) {
416 THC_UNLOCK(&hc_head->hch_mtx);
417 return NULL;
418 }
419 }
420
421 /*
422 * Initialize basic information of hostcache entry.
423 */
424 bzero(hc_entry, sizeof(*hc_entry));
425 if (inc->inc_flags & INC_ISIPV6) {
426 hc_entry->ip6 = inc->inc6_faddr;
427 hc_entry->ip6_zoneid = inc->inc6_zoneid;
428 } else
429 hc_entry->ip4 = inc->inc_faddr;
430 hc_entry->rmx_head = hc_head;
431 hc_entry->rmx_expire = V_tcp_hostcache.expire;
432
433 /*
434 * Put it upfront.
435 */
436 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
437 V_tcp_hostcache.hashbase[hash].hch_length++;
438 KASSERT(V_tcp_hostcache.hashbase[hash].hch_length <
439 V_tcp_hostcache.bucket_limit,
440 ("tcp_hostcache: bucket length too high at %u: %u",
441 hash, V_tcp_hostcache.hashbase[hash].hch_length));
442 atomic_add_int(&V_tcp_hostcache.cache_count, 1);
443 TCPSTAT_INC(tcps_hc_added);
444
445 return hc_entry;
446 }
447
448 /*
449 * External function: look up an entry in the hostcache and fill out the
450 * supplied TCP metrics structure. Fills in NULL when no entry was found or
451 * a value is not set.
452 */
453 void
tcp_hc_get(struct in_conninfo * inc,struct hc_metrics_lite * hc_metrics_lite)454 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
455 {
456 struct hc_metrics *hc_entry;
457
458 if (!V_tcp_use_hostcache) {
459 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
460 return;
461 }
462
463 /*
464 * Find the right bucket.
465 */
466 hc_entry = tcp_hc_lookup(inc);
467
468 /*
469 * If we don't have an existing object.
470 */
471 if (hc_entry == NULL) {
472 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
473 return;
474 }
475 hc_entry->rmx_hits++;
476 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
477
478 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
479 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
480 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
481 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
482 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
483 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
484 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
485
486 /*
487 * Unlock bucket row.
488 */
489 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
490 }
491
492 /*
493 * External function: look up an entry in the hostcache and return the
494 * discovered path MTU. Returns 0 if no entry is found or value is not
495 * set.
496 */
497 uint32_t
tcp_hc_getmtu(struct in_conninfo * inc)498 tcp_hc_getmtu(struct in_conninfo *inc)
499 {
500 struct hc_metrics *hc_entry;
501 uint32_t mtu;
502
503 if (!V_tcp_use_hostcache)
504 return 0;
505
506 hc_entry = tcp_hc_lookup(inc);
507 if (hc_entry == NULL) {
508 return 0;
509 }
510 hc_entry->rmx_hits++;
511 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
512
513 mtu = hc_entry->rmx_mtu;
514 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
515 return mtu;
516 }
517
518 /*
519 * External function: update the MTU value of an entry in the hostcache.
520 * Creates a new entry if none was found.
521 */
522 void
tcp_hc_updatemtu(struct in_conninfo * inc,uint32_t mtu)523 tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
524 {
525 struct hc_metrics *hc_entry;
526
527 if (!V_tcp_use_hostcache)
528 return;
529
530 /*
531 * Find the right bucket.
532 */
533 hc_entry = tcp_hc_lookup(inc);
534
535 /*
536 * If we don't have an existing object, try to insert a new one.
537 */
538 if (hc_entry == NULL) {
539 hc_entry = tcp_hc_insert(inc);
540 if (hc_entry == NULL)
541 return;
542 }
543 hc_entry->rmx_updates++;
544 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
545
546 hc_entry->rmx_mtu = mtu;
547
548 /*
549 * Put it upfront so we find it faster next time.
550 */
551 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
552 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
553
554 /*
555 * Unlock bucket row.
556 */
557 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
558 }
559
560 /*
561 * External function: update the TCP metrics of an entry in the hostcache.
562 * Creates a new entry if none was found.
563 */
564 void
tcp_hc_update(struct in_conninfo * inc,struct hc_metrics_lite * hcml)565 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
566 {
567 struct hc_metrics *hc_entry;
568
569 if (!V_tcp_use_hostcache)
570 return;
571
572 hc_entry = tcp_hc_lookup(inc);
573 if (hc_entry == NULL) {
574 hc_entry = tcp_hc_insert(inc);
575 if (hc_entry == NULL)
576 return;
577 }
578 hc_entry->rmx_updates++;
579 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
580
581 if (hcml->rmx_rtt != 0) {
582 if (hc_entry->rmx_rtt == 0)
583 hc_entry->rmx_rtt = hcml->rmx_rtt;
584 else
585 hc_entry->rmx_rtt = ((uint64_t)hc_entry->rmx_rtt +
586 (uint64_t)hcml->rmx_rtt) / 2;
587 TCPSTAT_INC(tcps_cachedrtt);
588 }
589 if (hcml->rmx_rttvar != 0) {
590 if (hc_entry->rmx_rttvar == 0)
591 hc_entry->rmx_rttvar = hcml->rmx_rttvar;
592 else
593 hc_entry->rmx_rttvar = ((uint64_t)hc_entry->rmx_rttvar +
594 (uint64_t)hcml->rmx_rttvar) / 2;
595 TCPSTAT_INC(tcps_cachedrttvar);
596 }
597 if (hcml->rmx_ssthresh != 0) {
598 if (hc_entry->rmx_ssthresh == 0)
599 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
600 else
601 hc_entry->rmx_ssthresh =
602 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
603 TCPSTAT_INC(tcps_cachedssthresh);
604 }
605 if (hcml->rmx_cwnd != 0) {
606 if (hc_entry->rmx_cwnd == 0)
607 hc_entry->rmx_cwnd = hcml->rmx_cwnd;
608 else
609 hc_entry->rmx_cwnd = ((uint64_t)hc_entry->rmx_cwnd +
610 (uint64_t)hcml->rmx_cwnd) / 2;
611 /* TCPSTAT_INC(tcps_cachedcwnd); */
612 }
613 if (hcml->rmx_sendpipe != 0) {
614 if (hc_entry->rmx_sendpipe == 0)
615 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
616 else
617 hc_entry->rmx_sendpipe =
618 ((uint64_t)hc_entry->rmx_sendpipe +
619 (uint64_t)hcml->rmx_sendpipe) /2;
620 /* TCPSTAT_INC(tcps_cachedsendpipe); */
621 }
622 if (hcml->rmx_recvpipe != 0) {
623 if (hc_entry->rmx_recvpipe == 0)
624 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
625 else
626 hc_entry->rmx_recvpipe =
627 ((uint64_t)hc_entry->rmx_recvpipe +
628 (uint64_t)hcml->rmx_recvpipe) /2;
629 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
630 }
631
632 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
633 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
634 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
635 }
636
637 /*
638 * Sysctl function: prints the list and values of all hostcache entries in
639 * unsorted order.
640 */
641 static int
sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)642 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
643 {
644 const int linesize = 128;
645 struct sbuf sb;
646 int i, error, len;
647 struct hc_metrics *hc_entry;
648 char ip4buf[INET_ADDRSTRLEN];
649 #ifdef INET6
650 char ip6buf[INET6_ADDRSTRLEN];
651 #endif
652
653 if (jailed_without_vnet(curthread->td_ucred) != 0)
654 return (EPERM);
655
656 /* Optimize Buffer length query by sbin/sysctl */
657 if (req->oldptr == NULL) {
658 len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) *
659 linesize;
660 return (SYSCTL_OUT(req, NULL, len));
661 }
662
663 error = sysctl_wire_old_buffer(req, 0);
664 if (error != 0) {
665 return(error);
666 }
667
668 /* Use a buffer sized for one full bucket */
669 sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit *
670 linesize, req);
671
672 sbuf_printf(&sb,
673 "\nIP address MTU SSTRESH RTT RTTVAR "
674 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n");
675 sbuf_drain(&sb);
676
677 #define msec(u) (((u) + 500) / 1000)
678 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
679 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
680 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
681 rmx_q) {
682 sbuf_printf(&sb,
683 "%-15s %5u %8u %6lums %6lums %8u %8u %8u %4lu "
684 "%4lu %4i\n",
685 hc_entry->ip4.s_addr ?
686 inet_ntoa_r(hc_entry->ip4, ip4buf) :
687 #ifdef INET6
688 ip6_sprintf(ip6buf, &hc_entry->ip6),
689 #else
690 "IPv6?",
691 #endif
692 hc_entry->rmx_mtu,
693 hc_entry->rmx_ssthresh,
694 msec((u_long)hc_entry->rmx_rtt *
695 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
696 msec((u_long)hc_entry->rmx_rttvar *
697 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
698 hc_entry->rmx_cwnd,
699 hc_entry->rmx_sendpipe,
700 hc_entry->rmx_recvpipe,
701 hc_entry->rmx_hits,
702 hc_entry->rmx_updates,
703 hc_entry->rmx_expire);
704 }
705 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
706 sbuf_drain(&sb);
707 }
708 #undef msec
709 error = sbuf_finish(&sb);
710 sbuf_delete(&sb);
711 return(error);
712 }
713
714 /*
715 * Sysctl function: prints a histogram of the hostcache hashbucket
716 * utilization.
717 */
718 static int
sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS)719 sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS)
720 {
721 const int linesize = 50;
722 struct sbuf sb;
723 int i, error;
724 int *histo;
725 u_int hch_length;
726
727 if (jailed_without_vnet(curthread->td_ucred) != 0)
728 return (EPERM);
729
730 histo = (int *)malloc(sizeof(int) * (V_tcp_hostcache.bucket_limit + 1),
731 M_TEMP, M_NOWAIT|M_ZERO);
732 if (histo == NULL)
733 return(ENOMEM);
734
735 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
736 hch_length = V_tcp_hostcache.hashbase[i].hch_length;
737 KASSERT(hch_length <= V_tcp_hostcache.bucket_limit,
738 ("tcp_hostcache: bucket limit exceeded at %u: %u",
739 i, hch_length));
740 histo[hch_length]++;
741 }
742
743 /* Use a buffer for 16 lines */
744 sbuf_new_for_sysctl(&sb, NULL, 16 * linesize, req);
745
746 sbuf_printf(&sb, "\nLength\tCount\n");
747 for (i = 0; i <= V_tcp_hostcache.bucket_limit; i++) {
748 sbuf_printf(&sb, "%u\t%u\n", i, histo[i]);
749 }
750 error = sbuf_finish(&sb);
751 sbuf_delete(&sb);
752 free(histo, M_TEMP);
753 return(error);
754 }
755
756 /*
757 * Caller has to make sure the curvnet is set properly.
758 */
759 static void
tcp_hc_purge_internal(int all)760 tcp_hc_purge_internal(int all)
761 {
762 struct hc_metrics *hc_entry, *hc_next;
763 int i;
764
765 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
766 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
767 TAILQ_FOREACH_SAFE(hc_entry,
768 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
769 KASSERT(V_tcp_hostcache.hashbase[i].hch_length > 0 &&
770 V_tcp_hostcache.hashbase[i].hch_length <=
771 V_tcp_hostcache.bucket_limit,
772 ("tcp_hostcache: bucket length out of range at %u: %u",
773 i, V_tcp_hostcache.hashbase[i].hch_length));
774 if (all || hc_entry->rmx_expire <= 0) {
775 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
776 hc_entry, rmx_q);
777 uma_zfree(V_tcp_hostcache.zone, hc_entry);
778 V_tcp_hostcache.hashbase[i].hch_length--;
779 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
780 } else
781 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
782 }
783 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
784 }
785 }
786
787 /*
788 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
789 * periodically from the callout.
790 */
791 static void
tcp_hc_purge(void * arg)792 tcp_hc_purge(void *arg)
793 {
794 CURVNET_SET((struct vnet *) arg);
795 int all = 0;
796
797 if (V_tcp_hostcache.purgeall) {
798 if (V_tcp_hostcache.purgeall == 2)
799 V_tcp_hostcache.hashsalt = arc4random();
800 all = 1;
801 V_tcp_hostcache.purgeall = 0;
802 }
803
804 tcp_hc_purge_internal(all);
805
806 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
807 tcp_hc_purge, arg);
808 CURVNET_RESTORE();
809 }
810
811 /*
812 * Expire and purge all entries in hostcache immediately.
813 */
814 static int
sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)815 sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
816 {
817 int error, val;
818
819 val = 0;
820 error = sysctl_handle_int(oidp, &val, 0, req);
821 if (error || !req->newptr)
822 return (error);
823
824 if (val == 2)
825 V_tcp_hostcache.hashsalt = arc4random();
826 tcp_hc_purge_internal(1);
827
828 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
829 tcp_hc_purge, curvnet);
830
831 return (0);
832 }
833