1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2005 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifdef FSTACK
32 #include <stdint.h>
33 #endif
34
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/malloc.h>
38 #include <sys/sysctl.h>
39
40 #include <err.h>
41 #include <errno.h>
42 #ifndef FSTACK
43 #include <kvm.h>
44 #endif
45 #include <nlist.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <string.h>
49
50 #include "memstat.h"
51 #include "memstat_internal.h"
52
53 #ifndef FSTACK
54 static int memstat_malloc_zone_count;
55 static int memstat_malloc_zone_sizes[32];
56
57 static int memstat_malloc_zone_init(void);
58 static int memstat_malloc_zone_init_kvm(kvm_t *kvm);
59
60 static struct nlist namelist[] = {
61 #define X_KMEMSTATISTICS 0
62 { .n_name = "_kmemstatistics" },
63 #define X_KMEMZONES 1
64 { .n_name = "_kmemzones" },
65 #define X_NUMZONES 2
66 { .n_name = "_numzones" },
67 #define X_VM_MALLOC_ZONE_COUNT 3
68 { .n_name = "_vm_malloc_zone_count" },
69 #define X_MP_MAXCPUS 4
70 { .n_name = "_mp_maxcpus" },
71 { .n_name = "" },
72 };
73 #endif
74
75 /*
76 * Extract malloc(9) statistics from the running kernel, and store all memory
77 * type information in the passed list. For each type, check the list for an
78 * existing entry with the right name/allocator -- if present, update that
79 * entry. Otherwise, add a new entry. On error, the entire list will be
80 * cleared, as entries will be in an inconsistent state.
81 *
82 * To reduce the level of work for a list that starts empty, we keep around a
83 * hint as to whether it was empty when we began, so we can avoid searching
84 * the list for entries to update. Updates are O(n^2) due to searching for
85 * each entry before adding it.
86 */
87 int
memstat_sysctl_malloc(struct memory_type_list * list,int flags)88 memstat_sysctl_malloc(struct memory_type_list *list, int flags)
89 {
90 struct malloc_type_stream_header *mtshp;
91 struct malloc_type_header *mthp;
92 struct malloc_type_stats *mtsp;
93 struct memory_type *mtp;
94 int count, hint_dontsearch, i, j, maxcpus;
95 char *buffer, *p;
96 size_t size;
97
98 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
99
100 /*
101 * Query the number of CPUs, number of malloc types so that we can
102 * guess an initial buffer size. We loop until we succeed or really
103 * fail. Note that the value of maxcpus we query using sysctl is not
104 * the version we use when processing the real data -- that is read
105 * from the header.
106 */
107 retry:
108 size = sizeof(maxcpus);
109 if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
110 if (errno == EACCES || errno == EPERM)
111 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
112 else
113 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
114 return (-1);
115 }
116 if (size != sizeof(maxcpus)) {
117 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
118 return (-1);
119 }
120
121 size = sizeof(count);
122 if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
123 if (errno == EACCES || errno == EPERM)
124 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
125 else
126 list->mtl_error = MEMSTAT_ERROR_VERSION;
127 return (-1);
128 }
129 if (size != sizeof(count)) {
130 list->mtl_error = MEMSTAT_ERROR_DATAERROR;
131 return (-1);
132 }
133
134 #ifndef FSTACK
135 if (memstat_malloc_zone_init() == -1) {
136 list->mtl_error = MEMSTAT_ERROR_VERSION;
137 return (-1);
138 }
139 #endif
140
141 size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
142 maxcpus);
143
144 buffer = malloc(size);
145 if (buffer == NULL) {
146 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
147 return (-1);
148 }
149
150 if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
151 /*
152 * XXXRW: ENOMEM is an ambiguous return, we should bound the
153 * number of loops, perhaps.
154 */
155 if (errno == ENOMEM) {
156 free(buffer);
157 goto retry;
158 }
159 if (errno == EACCES || errno == EPERM)
160 list->mtl_error = MEMSTAT_ERROR_PERMISSION;
161 else
162 list->mtl_error = MEMSTAT_ERROR_VERSION;
163 free(buffer);
164 return (-1);
165 }
166
167 if (size == 0) {
168 free(buffer);
169 return (0);
170 }
171
172 if (size < sizeof(*mtshp)) {
173 list->mtl_error = MEMSTAT_ERROR_VERSION;
174 free(buffer);
175 return (-1);
176 }
177 p = buffer;
178 mtshp = (struct malloc_type_stream_header *)p;
179 p += sizeof(*mtshp);
180
181 if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
182 list->mtl_error = MEMSTAT_ERROR_VERSION;
183 free(buffer);
184 return (-1);
185 }
186
187 /*
188 * For the remainder of this function, we are quite trusting about
189 * the layout of structures and sizes, since we've determined we have
190 * a matching version and acceptable CPU count.
191 */
192 maxcpus = mtshp->mtsh_maxcpus;
193 count = mtshp->mtsh_count;
194 for (i = 0; i < count; i++) {
195 mthp = (struct malloc_type_header *)p;
196 p += sizeof(*mthp);
197
198 if (hint_dontsearch == 0) {
199 mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
200 mthp->mth_name);
201 } else
202 mtp = NULL;
203 if (mtp == NULL)
204 mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
205 mthp->mth_name, maxcpus);
206 if (mtp == NULL) {
207 _memstat_mtl_empty(list);
208 free(buffer);
209 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
210 return (-1);
211 }
212
213 /*
214 * Reset the statistics on a current node.
215 */
216 _memstat_mt_reset_stats(mtp, maxcpus);
217
218 for (j = 0; j < maxcpus; j++) {
219 mtsp = (struct malloc_type_stats *)p;
220 p += sizeof(*mtsp);
221
222 /*
223 * Sumarize raw statistics across CPUs into coalesced
224 * statistics.
225 */
226 mtp->mt_memalloced += mtsp->mts_memalloced;
227 mtp->mt_memfreed += mtsp->mts_memfreed;
228 mtp->mt_numallocs += mtsp->mts_numallocs;
229 mtp->mt_numfrees += mtsp->mts_numfrees;
230 mtp->mt_sizemask |= mtsp->mts_size;
231
232 /*
233 * Copies of per-CPU statistics.
234 */
235 mtp->mt_percpu_alloc[j].mtp_memalloced =
236 mtsp->mts_memalloced;
237 mtp->mt_percpu_alloc[j].mtp_memfreed =
238 mtsp->mts_memfreed;
239 mtp->mt_percpu_alloc[j].mtp_numallocs =
240 mtsp->mts_numallocs;
241 mtp->mt_percpu_alloc[j].mtp_numfrees =
242 mtsp->mts_numfrees;
243 mtp->mt_percpu_alloc[j].mtp_sizemask =
244 mtsp->mts_size;
245 }
246
247 /*
248 * Derived cross-CPU statistics.
249 */
250 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
251 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
252 }
253
254 free(buffer);
255
256 return (0);
257 }
258
259 #ifndef FSTACK
260 static int
kread(kvm_t * kvm,void * kvm_pointer,void * address,size_t size,size_t offset)261 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
262 size_t offset)
263 {
264 ssize_t ret;
265
266 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
267 size);
268 if (ret < 0)
269 return (MEMSTAT_ERROR_KVM);
270 if ((size_t)ret != size)
271 return (MEMSTAT_ERROR_KVM_SHORTREAD);
272 return (0);
273 }
274
275 static int
kread_string(kvm_t * kvm,const void * kvm_pointer,char * buffer,int buflen)276 kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
277 {
278 ssize_t ret;
279 int i;
280
281 for (i = 0; i < buflen; i++) {
282 ret = kvm_read(kvm, __DECONST(unsigned long, kvm_pointer) +
283 i, &(buffer[i]), sizeof(char));
284 if (ret < 0)
285 return (MEMSTAT_ERROR_KVM);
286 if ((size_t)ret != sizeof(char))
287 return (MEMSTAT_ERROR_KVM_SHORTREAD);
288 if (buffer[i] == '\0')
289 return (0);
290 }
291 /* Truncate. */
292 buffer[i-1] = '\0';
293 return (0);
294 }
295
296 static int
kread_symbol(kvm_t * kvm,int index,void * address,size_t size,size_t offset)297 kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
298 size_t offset)
299 {
300 ssize_t ret;
301
302 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
303 if (ret < 0)
304 return (MEMSTAT_ERROR_KVM);
305 if ((size_t)ret != size)
306 return (MEMSTAT_ERROR_KVM_SHORTREAD);
307 return (0);
308 }
309
310 static int
kread_zpcpu(kvm_t * kvm,u_long base,void * buf,size_t size,int cpu)311 kread_zpcpu(kvm_t *kvm, u_long base, void *buf, size_t size, int cpu)
312 {
313 ssize_t ret;
314
315 ret = kvm_read_zpcpu(kvm, base, buf, size, cpu);
316 if (ret < 0)
317 return (MEMSTAT_ERROR_KVM);
318 if ((size_t)ret != size)
319 return (MEMSTAT_ERROR_KVM_SHORTREAD);
320 return (0);
321 }
322
323 int
memstat_kvm_malloc(struct memory_type_list * list,void * kvm_handle)324 memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
325 {
326 struct memory_type *mtp;
327 void *kmemstatistics;
328 int hint_dontsearch, j, mp_maxcpus, mp_ncpus, ret;
329 char name[MEMTYPE_MAXNAME];
330 struct malloc_type_stats mts;
331 struct malloc_type_internal *mtip;
332 struct malloc_type type, *typep;
333 kvm_t *kvm;
334
335 kvm = (kvm_t *)kvm_handle;
336
337 hint_dontsearch = LIST_EMPTY(&list->mtl_list);
338
339 if (kvm_nlist(kvm, namelist) != 0) {
340 list->mtl_error = MEMSTAT_ERROR_KVM;
341 return (-1);
342 }
343
344 if (namelist[X_KMEMSTATISTICS].n_type == 0 ||
345 namelist[X_KMEMSTATISTICS].n_value == 0) {
346 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
347 return (-1);
348 }
349
350 ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus,
351 sizeof(mp_maxcpus), 0);
352 if (ret != 0) {
353 list->mtl_error = ret;
354 return (-1);
355 }
356
357 ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics,
358 sizeof(kmemstatistics), 0);
359 if (ret != 0) {
360 list->mtl_error = ret;
361 return (-1);
362 }
363
364 ret = memstat_malloc_zone_init_kvm(kvm);
365 if (ret != 0) {
366 list->mtl_error = ret;
367 return (-1);
368 }
369
370 mp_ncpus = kvm_getncpus(kvm);
371
372 for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) {
373 ret = kread(kvm, typep, &type, sizeof(type), 0);
374 if (ret != 0) {
375 _memstat_mtl_empty(list);
376 list->mtl_error = ret;
377 return (-1);
378 }
379 ret = kread_string(kvm, (void *)type.ks_shortdesc, name,
380 MEMTYPE_MAXNAME);
381 if (ret != 0) {
382 _memstat_mtl_empty(list);
383 list->mtl_error = ret;
384 return (-1);
385 }
386 if (type.ks_version != M_VERSION) {
387 warnx("type %s with unsupported version %lu; skipped",
388 name, type.ks_version);
389 continue;
390 }
391
392 /*
393 * Since our compile-time value for MAXCPU may differ from the
394 * kernel's, we populate our own array.
395 */
396 mtip = &type.ks_mti;
397
398 if (hint_dontsearch == 0) {
399 mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name);
400 } else
401 mtp = NULL;
402 if (mtp == NULL)
403 mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
404 name, mp_maxcpus);
405 if (mtp == NULL) {
406 _memstat_mtl_empty(list);
407 list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
408 return (-1);
409 }
410
411 /*
412 * This logic is replicated from kern_malloc.c, and should
413 * be kept in sync.
414 */
415 _memstat_mt_reset_stats(mtp, mp_maxcpus);
416 for (j = 0; j < mp_ncpus; j++) {
417 ret = kread_zpcpu(kvm, (u_long)mtip->mti_stats, &mts,
418 sizeof(mts), j);
419 if (ret != 0) {
420 _memstat_mtl_empty(list);
421 list->mtl_error = ret;
422 return (-1);
423 }
424 mtp->mt_memalloced += mts.mts_memalloced;
425 mtp->mt_memfreed += mts.mts_memfreed;
426 mtp->mt_numallocs += mts.mts_numallocs;
427 mtp->mt_numfrees += mts.mts_numfrees;
428 mtp->mt_sizemask |= mts.mts_size;
429
430 mtp->mt_percpu_alloc[j].mtp_memalloced =
431 mts.mts_memalloced;
432 mtp->mt_percpu_alloc[j].mtp_memfreed =
433 mts.mts_memfreed;
434 mtp->mt_percpu_alloc[j].mtp_numallocs =
435 mts.mts_numallocs;
436 mtp->mt_percpu_alloc[j].mtp_numfrees =
437 mts.mts_numfrees;
438 mtp->mt_percpu_alloc[j].mtp_sizemask =
439 mts.mts_size;
440 }
441 for (; j < mp_maxcpus; j++) {
442 bzero(&mtp->mt_percpu_alloc[j],
443 sizeof(mtp->mt_percpu_alloc[0]));
444 }
445
446 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
447 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
448 }
449
450 return (0);
451 }
452
453 static int
memstat_malloc_zone_init(void)454 memstat_malloc_zone_init(void)
455 {
456 size_t size;
457
458 size = sizeof(memstat_malloc_zone_count);
459 if (sysctlbyname("vm.malloc.zone_count", &memstat_malloc_zone_count,
460 &size, NULL, 0) < 0) {
461 return (-1);
462 }
463
464 if (memstat_malloc_zone_count > (int)nitems(memstat_malloc_zone_sizes)) {
465 return (-1);
466 }
467
468 size = sizeof(memstat_malloc_zone_sizes);
469 if (sysctlbyname("vm.malloc.zone_sizes", &memstat_malloc_zone_sizes,
470 &size, NULL, 0) < 0) {
471 return (-1);
472 }
473
474 return (0);
475 }
476
477 /*
478 * Copied from kern_malloc.c
479 *
480 * kz_zone is an array sized at compilation time, the size is exported in
481 * "numzones". Below we need to iterate kz_size.
482 */
483 struct memstat_kmemzone {
484 int kz_size;
485 const char *kz_name;
486 void *kz_zone[1];
487 };
488
489 static int
memstat_malloc_zone_init_kvm(kvm_t * kvm)490 memstat_malloc_zone_init_kvm(kvm_t *kvm)
491 {
492 struct memstat_kmemzone *kmemzones, *kz;
493 int numzones, objsize, allocsize, ret;
494 int i;
495
496 ret = kread_symbol(kvm, X_VM_MALLOC_ZONE_COUNT,
497 &memstat_malloc_zone_count, sizeof(memstat_malloc_zone_count), 0);
498 if (ret != 0) {
499 return (ret);
500 }
501
502 ret = kread_symbol(kvm, X_NUMZONES, &numzones, sizeof(numzones), 0);
503 if (ret != 0) {
504 return (ret);
505 }
506
507 objsize = __offsetof(struct memstat_kmemzone, kz_zone) +
508 sizeof(void *) * numzones;
509
510 allocsize = objsize * memstat_malloc_zone_count;
511 kmemzones = malloc(allocsize);
512 if (kmemzones == NULL) {
513 return (MEMSTAT_ERROR_NOMEMORY);
514 }
515 ret = kread_symbol(kvm, X_KMEMZONES, kmemzones, allocsize, 0);
516 if (ret != 0) {
517 free(kmemzones);
518 return (ret);
519 }
520
521 kz = kmemzones;
522 for (i = 0; i < (int)nitems(memstat_malloc_zone_sizes); i++) {
523 memstat_malloc_zone_sizes[i] = kz->kz_size;
524 kz = (struct memstat_kmemzone *)((char *)kz + objsize);
525 }
526
527 free(kmemzones);
528 return (0);
529 }
530
531 size_t
memstat_malloc_zone_get_count(void)532 memstat_malloc_zone_get_count(void)
533 {
534
535 return (memstat_malloc_zone_count);
536 }
537
538 size_t
memstat_malloc_zone_get_size(size_t n)539 memstat_malloc_zone_get_size(size_t n)
540 {
541
542 if (n >= nitems(memstat_malloc_zone_sizes)) {
543 return (-1);
544 }
545
546 return (memstat_malloc_zone_sizes[n]);
547 }
548
549 int
memstat_malloc_zone_used(const struct memory_type * mtp,size_t n)550 memstat_malloc_zone_used(const struct memory_type *mtp, size_t n)
551 {
552
553 if (memstat_get_sizemask(mtp) & (1 << n))
554 return (1);
555
556 return (0);
557 }
558 #endif
559
560