1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote
16 * products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
34 * table to a dedicated structure indexed by the remote IP address. It keeps
35 * information on the measured TCP parameters of past TCP sessions to allow
36 * better initial start values to be used with later connections to/from the
37 * same source. Depending on the network parameters (delay, max MTU,
38 * congestion window) between local and remote sites, this can lead to
39 * significant speed-ups for new TCP connections after the first one.
40 *
41 * Due to the tcp_hostcache, all TCP-specific metrics information in the
42 * routing table have been removed. The inpcb no longer keeps a pointer to
43 * the routing entry, and protocol-initiated route cloning has been removed
44 * as well. With these changes, the routing table has gone back to being
45 * more lightwight and only carries information related to packet forwarding.
46 *
47 * tcp_hostcache is designed for multiple concurrent access in SMP
48 * environments and high contention. All bucket rows have their own lock and
49 * thus multiple lookups and modifies can be done at the same time as long as
50 * they are in different bucket rows. If a request for insertion of a new
51 * record can't be satisfied, it simply returns an empty structure. Nobody
52 * and nothing outside of tcp_hostcache.c will ever point directly to any
53 * entry in the tcp_hostcache. All communication is done in an
54 * object-oriented way and only functions of tcp_hostcache will manipulate
55 * hostcache entries. Otherwise, we are unable to achieve good behaviour in
56 * concurrent access situations. Since tcp_hostcache is only caching
57 * information, there are no fatal consequences if we either can't satisfy
58 * any particular request or have to drop/overwrite an existing entry because
59 * of bucket limit memory constrains.
60 */
61
62 /*
63 * Many thanks to jlemon for basic structure of tcp_syncache which is being
64 * followed here.
65 */
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include "opt_inet6.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/jail.h>
75 #include <sys/kernel.h>
76 #include <sys/lock.h>
77 #include <sys/mutex.h>
78 #include <sys/malloc.h>
79 #include <sys/proc.h>
80 #include <sys/sbuf.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/sysctl.h>
84
85 #include <net/if.h>
86 #include <net/if_var.h>
87 #include <net/route.h>
88 #include <net/vnet.h>
89
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/ip_var.h>
96 #ifdef INET6
97 #include <netinet/ip6.h>
98 #include <netinet6/ip6_var.h>
99 #endif
100 #include <netinet/tcp.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcp_hostcache.h>
103 #ifdef INET6
104 #include <netinet6/tcp6_var.h>
105 #endif
106
107 #include <vm/uma.h>
108
109 /* Arbitrary values */
110 #define TCP_HOSTCACHE_HASHSIZE 512
111 #define TCP_HOSTCACHE_BUCKETLIMIT 30
112 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */
113 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */
114
115 VNET_DEFINE_STATIC(struct tcp_hostcache, tcp_hostcache);
116 #define V_tcp_hostcache VNET(tcp_hostcache)
117
118 VNET_DEFINE_STATIC(struct callout, tcp_hc_callout);
119 #define V_tcp_hc_callout VNET(tcp_hc_callout)
120
121 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
122 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
123 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
124 static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
125 static void tcp_hc_purge_internal(int);
126 static void tcp_hc_purge(void *);
127
128 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache,
129 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
130 "TCP Host cache");
131
132 VNET_DEFINE(int, tcp_use_hostcache) = 1;
133 #define V_tcp_use_hostcache VNET(tcp_use_hostcache)
134 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
135 &VNET_NAME(tcp_use_hostcache), 0,
136 "Enable the TCP hostcache");
137
138 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
139 &VNET_NAME(tcp_hostcache.cache_limit), 0,
140 "Overall entry limit for hostcache");
141
142 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
143 &VNET_NAME(tcp_hostcache.hashsize), 0,
144 "Size of TCP hostcache hashtable");
145
146 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
147 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
148 "Per-bucket hash limit for hostcache");
149
150 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
151 &VNET_NAME(tcp_hostcache.cache_count), 0,
152 "Current number of entries in hostcache");
153
154 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
155 &VNET_NAME(tcp_hostcache.expire), 0,
156 "Expire time of TCP hostcache entries");
157
158 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
159 &VNET_NAME(tcp_hostcache.prune), 0,
160 "Time between purge runs");
161
162 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
163 &VNET_NAME(tcp_hostcache.purgeall), 0,
164 "Expire all entires on next purge run");
165
166 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
167 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
168 0, 0, sysctl_tcp_hc_list, "A",
169 "List of all hostcache entries");
170
171 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
172 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
173 NULL, 0, sysctl_tcp_hc_purgenow, "I",
174 "Immediately purge all entries");
175
176 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
177
178 #define HOSTCACHE_HASH(ip) \
179 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
180 V_tcp_hostcache.hashmask)
181
182 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
183 #define HOSTCACHE_HASH6(ip6) \
184 (((ip6)->s6_addr32[0] ^ \
185 (ip6)->s6_addr32[1] ^ \
186 (ip6)->s6_addr32[2] ^ \
187 (ip6)->s6_addr32[3]) & \
188 V_tcp_hostcache.hashmask)
189
190 #define THC_LOCK(lp) mtx_lock(lp)
191 #define THC_UNLOCK(lp) mtx_unlock(lp)
192
193 void
tcp_hc_init(void)194 tcp_hc_init(void)
195 {
196 u_int cache_limit;
197 int i;
198
199 /*
200 * Initialize hostcache structures.
201 */
202 V_tcp_hostcache.cache_count = 0;
203 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
204 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
205 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
206 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
207
208 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
209 &V_tcp_hostcache.hashsize);
210 if (!powerof2(V_tcp_hostcache.hashsize)) {
211 printf("WARNING: hostcache hash size is not a power of 2.\n");
212 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
213 }
214 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
215
216 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
217 &V_tcp_hostcache.bucket_limit);
218
219 cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
220 V_tcp_hostcache.cache_limit = cache_limit;
221 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
222 &V_tcp_hostcache.cache_limit);
223 if (V_tcp_hostcache.cache_limit > cache_limit)
224 V_tcp_hostcache.cache_limit = cache_limit;
225
226 /*
227 * Allocate the hash table.
228 */
229 V_tcp_hostcache.hashbase = (struct hc_head *)
230 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
231 M_HOSTCACHE, M_WAITOK | M_ZERO);
232
233 /*
234 * Initialize the hash buckets.
235 */
236 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
237 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
238 V_tcp_hostcache.hashbase[i].hch_length = 0;
239 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
240 NULL, MTX_DEF);
241 }
242
243 /*
244 * Allocate the hostcache entries.
245 */
246 V_tcp_hostcache.zone =
247 uma_zcreate("hostcache", sizeof(struct hc_metrics),
248 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
249 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
250
251 /*
252 * Set up periodic cache cleanup.
253 */
254 callout_init(&V_tcp_hc_callout, 1);
255 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
256 tcp_hc_purge, curvnet);
257 }
258
259 #ifdef VIMAGE
260 void
tcp_hc_destroy(void)261 tcp_hc_destroy(void)
262 {
263 int i;
264
265 callout_drain(&V_tcp_hc_callout);
266
267 /* Purge all hc entries. */
268 tcp_hc_purge_internal(1);
269
270 /* Free the uma zone and the allocated hash table. */
271 uma_zdestroy(V_tcp_hostcache.zone);
272
273 for (i = 0; i < V_tcp_hostcache.hashsize; i++)
274 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
275 free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
276 }
277 #endif
278
279 /*
280 * Internal function: look up an entry in the hostcache or return NULL.
281 *
282 * If an entry has been returned, the caller becomes responsible for
283 * unlocking the bucket row after he is done reading/modifying the entry.
284 */
285 static struct hc_metrics *
tcp_hc_lookup(struct in_conninfo * inc)286 tcp_hc_lookup(struct in_conninfo *inc)
287 {
288 int hash;
289 struct hc_head *hc_head;
290 struct hc_metrics *hc_entry;
291
292 if (!V_tcp_use_hostcache)
293 return NULL;
294
295 KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
296
297 /*
298 * Hash the foreign ip address.
299 */
300 if (inc->inc_flags & INC_ISIPV6)
301 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
302 else
303 hash = HOSTCACHE_HASH(&inc->inc_faddr);
304
305 hc_head = &V_tcp_hostcache.hashbase[hash];
306
307 /*
308 * Acquire lock for this bucket row; we release the lock if we don't
309 * find an entry, otherwise the caller has to unlock after he is
310 * done.
311 */
312 THC_LOCK(&hc_head->hch_mtx);
313
314 /*
315 * Iterate through entries in bucket row looking for a match.
316 */
317 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
318 if (inc->inc_flags & INC_ISIPV6) {
319 /* XXX: check ip6_zoneid */
320 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
321 sizeof(inc->inc6_faddr)) == 0)
322 return hc_entry;
323 } else {
324 if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
325 sizeof(inc->inc_faddr)) == 0)
326 return hc_entry;
327 }
328 }
329
330 /*
331 * We were unsuccessful and didn't find anything.
332 */
333 THC_UNLOCK(&hc_head->hch_mtx);
334 return NULL;
335 }
336
337 /*
338 * Internal function: insert an entry into the hostcache or return NULL if
339 * unable to allocate a new one.
340 *
341 * If an entry has been returned, the caller becomes responsible for
342 * unlocking the bucket row after he is done reading/modifying the entry.
343 */
344 static struct hc_metrics *
tcp_hc_insert(struct in_conninfo * inc)345 tcp_hc_insert(struct in_conninfo *inc)
346 {
347 int hash;
348 struct hc_head *hc_head;
349 struct hc_metrics *hc_entry;
350
351 if (!V_tcp_use_hostcache)
352 return NULL;
353
354 KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
355
356 /*
357 * Hash the foreign ip address.
358 */
359 if (inc->inc_flags & INC_ISIPV6)
360 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
361 else
362 hash = HOSTCACHE_HASH(&inc->inc_faddr);
363
364 hc_head = &V_tcp_hostcache.hashbase[hash];
365
366 /*
367 * Acquire lock for this bucket row; we release the lock if we don't
368 * find an entry, otherwise the caller has to unlock after he is
369 * done.
370 */
371 THC_LOCK(&hc_head->hch_mtx);
372
373 /*
374 * If the bucket limit is reached, reuse the least-used element.
375 */
376 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
377 V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
378 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
379 /*
380 * At first we were dropping the last element, just to
381 * reacquire it in the next two lines again, which isn't very
382 * efficient. Instead just reuse the least used element.
383 * We may drop something that is still "in-use" but we can be
384 * "lossy".
385 * Just give up if this bucket row is empty and we don't have
386 * anything to replace.
387 */
388 if (hc_entry == NULL) {
389 THC_UNLOCK(&hc_head->hch_mtx);
390 return NULL;
391 }
392 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
393 V_tcp_hostcache.hashbase[hash].hch_length--;
394 V_tcp_hostcache.cache_count--;
395 TCPSTAT_INC(tcps_hc_bucketoverflow);
396 #if 0
397 uma_zfree(V_tcp_hostcache.zone, hc_entry);
398 #endif
399 } else {
400 /*
401 * Allocate a new entry, or balk if not possible.
402 */
403 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
404 if (hc_entry == NULL) {
405 THC_UNLOCK(&hc_head->hch_mtx);
406 return NULL;
407 }
408 }
409
410 /*
411 * Initialize basic information of hostcache entry.
412 */
413 bzero(hc_entry, sizeof(*hc_entry));
414 if (inc->inc_flags & INC_ISIPV6) {
415 hc_entry->ip6 = inc->inc6_faddr;
416 hc_entry->ip6_zoneid = inc->inc6_zoneid;
417 } else
418 hc_entry->ip4 = inc->inc_faddr;
419 hc_entry->rmx_head = hc_head;
420 hc_entry->rmx_expire = V_tcp_hostcache.expire;
421
422 /*
423 * Put it upfront.
424 */
425 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
426 V_tcp_hostcache.hashbase[hash].hch_length++;
427 V_tcp_hostcache.cache_count++;
428 TCPSTAT_INC(tcps_hc_added);
429
430 return hc_entry;
431 }
432
433 /*
434 * External function: look up an entry in the hostcache and fill out the
435 * supplied TCP metrics structure. Fills in NULL when no entry was found or
436 * a value is not set.
437 */
438 void
tcp_hc_get(struct in_conninfo * inc,struct hc_metrics_lite * hc_metrics_lite)439 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
440 {
441 struct hc_metrics *hc_entry;
442
443 if (!V_tcp_use_hostcache) {
444 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
445 return;
446 }
447
448 /*
449 * Find the right bucket.
450 */
451 hc_entry = tcp_hc_lookup(inc);
452
453 /*
454 * If we don't have an existing object.
455 */
456 if (hc_entry == NULL) {
457 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
458 return;
459 }
460 hc_entry->rmx_hits++;
461 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
462
463 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
464 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
465 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
466 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
467 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
468 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
469 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
470
471 /*
472 * Unlock bucket row.
473 */
474 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
475 }
476
477 /*
478 * External function: look up an entry in the hostcache and return the
479 * discovered path MTU. Returns 0 if no entry is found or value is not
480 * set.
481 */
482 uint32_t
tcp_hc_getmtu(struct in_conninfo * inc)483 tcp_hc_getmtu(struct in_conninfo *inc)
484 {
485 struct hc_metrics *hc_entry;
486 uint32_t mtu;
487
488 if (!V_tcp_use_hostcache)
489 return 0;
490
491 hc_entry = tcp_hc_lookup(inc);
492 if (hc_entry == NULL) {
493 return 0;
494 }
495 hc_entry->rmx_hits++;
496 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
497
498 mtu = hc_entry->rmx_mtu;
499 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
500 return mtu;
501 }
502
503 /*
504 * External function: update the MTU value of an entry in the hostcache.
505 * Creates a new entry if none was found.
506 */
507 void
tcp_hc_updatemtu(struct in_conninfo * inc,uint32_t mtu)508 tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
509 {
510 struct hc_metrics *hc_entry;
511
512 if (!V_tcp_use_hostcache)
513 return;
514
515 /*
516 * Find the right bucket.
517 */
518 hc_entry = tcp_hc_lookup(inc);
519
520 /*
521 * If we don't have an existing object, try to insert a new one.
522 */
523 if (hc_entry == NULL) {
524 hc_entry = tcp_hc_insert(inc);
525 if (hc_entry == NULL)
526 return;
527 }
528 hc_entry->rmx_updates++;
529 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
530
531 hc_entry->rmx_mtu = mtu;
532
533 /*
534 * Put it upfront so we find it faster next time.
535 */
536 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
537 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
538
539 /*
540 * Unlock bucket row.
541 */
542 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
543 }
544
545 /*
546 * External function: update the TCP metrics of an entry in the hostcache.
547 * Creates a new entry if none was found.
548 */
549 void
tcp_hc_update(struct in_conninfo * inc,struct hc_metrics_lite * hcml)550 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
551 {
552 struct hc_metrics *hc_entry;
553
554 if (!V_tcp_use_hostcache)
555 return;
556
557 hc_entry = tcp_hc_lookup(inc);
558 if (hc_entry == NULL) {
559 hc_entry = tcp_hc_insert(inc);
560 if (hc_entry == NULL)
561 return;
562 }
563 hc_entry->rmx_updates++;
564 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
565
566 if (hcml->rmx_rtt != 0) {
567 if (hc_entry->rmx_rtt == 0)
568 hc_entry->rmx_rtt = hcml->rmx_rtt;
569 else
570 hc_entry->rmx_rtt = ((uint64_t)hc_entry->rmx_rtt +
571 (uint64_t)hcml->rmx_rtt) / 2;
572 TCPSTAT_INC(tcps_cachedrtt);
573 }
574 if (hcml->rmx_rttvar != 0) {
575 if (hc_entry->rmx_rttvar == 0)
576 hc_entry->rmx_rttvar = hcml->rmx_rttvar;
577 else
578 hc_entry->rmx_rttvar = ((uint64_t)hc_entry->rmx_rttvar +
579 (uint64_t)hcml->rmx_rttvar) / 2;
580 TCPSTAT_INC(tcps_cachedrttvar);
581 }
582 if (hcml->rmx_ssthresh != 0) {
583 if (hc_entry->rmx_ssthresh == 0)
584 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
585 else
586 hc_entry->rmx_ssthresh =
587 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
588 TCPSTAT_INC(tcps_cachedssthresh);
589 }
590 if (hcml->rmx_cwnd != 0) {
591 if (hc_entry->rmx_cwnd == 0)
592 hc_entry->rmx_cwnd = hcml->rmx_cwnd;
593 else
594 hc_entry->rmx_cwnd = ((uint64_t)hc_entry->rmx_cwnd +
595 (uint64_t)hcml->rmx_cwnd) / 2;
596 /* TCPSTAT_INC(tcps_cachedcwnd); */
597 }
598 if (hcml->rmx_sendpipe != 0) {
599 if (hc_entry->rmx_sendpipe == 0)
600 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
601 else
602 hc_entry->rmx_sendpipe =
603 ((uint64_t)hc_entry->rmx_sendpipe +
604 (uint64_t)hcml->rmx_sendpipe) /2;
605 /* TCPSTAT_INC(tcps_cachedsendpipe); */
606 }
607 if (hcml->rmx_recvpipe != 0) {
608 if (hc_entry->rmx_recvpipe == 0)
609 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
610 else
611 hc_entry->rmx_recvpipe =
612 ((uint64_t)hc_entry->rmx_recvpipe +
613 (uint64_t)hcml->rmx_recvpipe) /2;
614 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
615 }
616
617 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
618 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
619 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
620 }
621
622 /*
623 * Sysctl function: prints the list and values of all hostcache entries in
624 * unsorted order.
625 */
626 static int
sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)627 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
628 {
629 const int linesize = 128;
630 struct sbuf sb;
631 int i, error;
632 struct hc_metrics *hc_entry;
633 char ip4buf[INET_ADDRSTRLEN];
634 #ifdef INET6
635 char ip6buf[INET6_ADDRSTRLEN];
636 #endif
637
638 if (jailed_without_vnet(curthread->td_ucred) != 0)
639 return (EPERM);
640
641 sbuf_new(&sb, NULL, linesize * (V_tcp_hostcache.cache_count + 1),
642 SBUF_INCLUDENUL);
643
644 sbuf_printf(&sb,
645 "\nIP address MTU SSTRESH RTT RTTVAR "
646 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n");
647
648 #define msec(u) (((u) + 500) / 1000)
649 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
650 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
651 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
652 rmx_q) {
653 sbuf_printf(&sb,
654 "%-15s %5u %8u %6lums %6lums %8u %8u %8u %4lu "
655 "%4lu %4i\n",
656 hc_entry->ip4.s_addr ?
657 inet_ntoa_r(hc_entry->ip4, ip4buf) :
658 #ifdef INET6
659 ip6_sprintf(ip6buf, &hc_entry->ip6),
660 #else
661 "IPv6?",
662 #endif
663 hc_entry->rmx_mtu,
664 hc_entry->rmx_ssthresh,
665 msec((u_long)hc_entry->rmx_rtt *
666 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
667 msec((u_long)hc_entry->rmx_rttvar *
668 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
669 hc_entry->rmx_cwnd,
670 hc_entry->rmx_sendpipe,
671 hc_entry->rmx_recvpipe,
672 hc_entry->rmx_hits,
673 hc_entry->rmx_updates,
674 hc_entry->rmx_expire);
675 }
676 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
677 }
678 #undef msec
679 error = sbuf_finish(&sb);
680 if (error == 0)
681 error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
682 sbuf_delete(&sb);
683 return(error);
684 }
685
686 /*
687 * Caller has to make sure the curvnet is set properly.
688 */
689 static void
tcp_hc_purge_internal(int all)690 tcp_hc_purge_internal(int all)
691 {
692 struct hc_metrics *hc_entry, *hc_next;
693 int i;
694
695 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
696 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
697 TAILQ_FOREACH_SAFE(hc_entry,
698 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
699 if (all || hc_entry->rmx_expire <= 0) {
700 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
701 hc_entry, rmx_q);
702 uma_zfree(V_tcp_hostcache.zone, hc_entry);
703 V_tcp_hostcache.hashbase[i].hch_length--;
704 V_tcp_hostcache.cache_count--;
705 } else
706 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
707 }
708 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
709 }
710 }
711
712 /*
713 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
714 * periodically from the callout.
715 */
716 static void
tcp_hc_purge(void * arg)717 tcp_hc_purge(void *arg)
718 {
719 CURVNET_SET((struct vnet *) arg);
720 int all = 0;
721
722 if (V_tcp_hostcache.purgeall) {
723 all = 1;
724 V_tcp_hostcache.purgeall = 0;
725 }
726
727 tcp_hc_purge_internal(all);
728
729 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
730 tcp_hc_purge, arg);
731 CURVNET_RESTORE();
732 }
733
734 /*
735 * Expire and purge all entries in hostcache immediately.
736 */
737 static int
sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)738 sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
739 {
740 int error, val;
741
742 val = 0;
743 error = sysctl_handle_int(oidp, &val, 0, req);
744 if (error || !req->newptr)
745 return (error);
746
747 tcp_hc_purge_internal(1);
748
749 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
750 tcp_hc_purge, curvnet);
751
752 return (0);
753 }
754