| /f-stack/app/redis-5.0.5/deps/jemalloc/test/unit/ |
| H A D | stats.c | 7 sz = sizeof(size_t); in TEST_BEGIN() 33 size_t sz; in TEST_BEGIN() local 42 sz = sizeof(size_t); in TEST_BEGIN() 46 sz = sizeof(uint64_t); in TEST_BEGIN() 71 size_t sz; in TEST_BEGIN() local 93 sz = sizeof(size_t); in TEST_BEGIN() 145 size_t sz, allocated; in TEST_BEGIN() local 160 sz = sizeof(size_t); in TEST_BEGIN() 200 sz = sizeof(size_t); in TEST_BEGIN() 268 sz = sizeof(size_t); in TEST_BEGIN() [all …]
|
| H A D | mallctl.c | 7 size_t sz; in TEST_BEGIN() local 23 sz = sizeof(epoch)-1; in TEST_BEGIN() 26 sz = sizeof(epoch)+1; in TEST_BEGIN() 44 size_t sz; in TEST_BEGIN() local 67 sz = sizeof(epoch)-1; in TEST_BEGIN() 71 sz = sizeof(epoch)+1; in TEST_BEGIN() 245 size_t sz, psz, qsz; in TEST_BEGIN() local 252 sz = sizeof(unsigned); in TEST_BEGIN() 340 sz = sizeof(unsigned); in TEST_BEGIN() 370 size_t sz; in TEST_BEGIN() local [all …]
|
| H A D | zero.c | 6 size_t sz_prev, sz, i; in test_zero() local 13 for (sz = sallocx(s, 0); sz <= sz_max; in test_zero() 14 sz_prev = sz, sz = sallocx(s, 0)) { in test_zero() 24 for (i = sz_prev; i < sz; i++) { in test_zero() 27 i, sz); in test_zero() 31 if (xallocx(s, sz+1, 0, 0) == sz) { in test_zero() 32 s = (uint8_t *)rallocx(s, sz+1, 0); in test_zero()
|
| H A D | junk.c | 58 size_t sz_prev, sz, i; in test_junk() local 73 for (sz = sallocx(s, 0); sz <= sz_max; in test_junk() 74 sz_prev = sz, sz = sallocx(s, 0)) { in test_junk() 84 for (i = sz_prev; i < sz; i++) { in test_junk() 88 "junk-filled", i, sz); in test_junk() 93 if (xallocx(s, sz+1, 0, 0) == sz) { in test_junk() 96 t = (uint8_t *)rallocx(s, sz+1, 0); in test_junk() 99 assert_zu_ge(sallocx(t, 0), sz+1, in test_junk() 106 "junk-filled", sz); in test_junk() 115 "Expected region of size %zu to be junk-filled", sz); in test_junk()
|
| H A D | decay.c | 15 size_t sz = sizeof(bool); in check_background_thread_enabled() local 186 size_t sz, large0; in TEST_BEGIN() local 189 sz = sizeof(size_t); in TEST_BEGIN() 289 sz); in TEST_BEGIN() 297 sz); in TEST_BEGIN() 304 sz); in TEST_BEGIN() 311 sz); in TEST_BEGIN() 320 "(sz=%zu)", sz); in TEST_BEGIN() 352 "(sz=%zu)", sz); in TEST_BEGIN() 490 size_t sz, large0; in TEST_BEGIN() local [all …]
|
| H A D | pack.c | 21 size_t sz; in binind_compute() local 24 sz = sizeof(nbins); in binind_compute() 25 assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, in binind_compute() 37 sz = sizeof(size); in binind_compute() 38 assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, in binind_compute() 52 size_t sz; in nregs_per_run_compute() local 60 sz = sizeof(nregs); in nregs_per_run_compute() 61 assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, in nregs_per_run_compute() 69 size_t sz; in arenas_create_mallctl() local 71 sz = sizeof(arena_ind); in arenas_create_mallctl() [all …]
|
| H A D | extent_quantize.c | 5 size_t sz, extent_size; in TEST_BEGIN() local 14 sz = sizeof(unsigned); in TEST_BEGIN() 15 assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, in TEST_BEGIN() 22 sz = sizeof(size_t); in TEST_BEGIN() 23 assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, in TEST_BEGIN() 40 size_t sz, extent_size_prev, ceil_prev; in TEST_BEGIN() local 49 sz = sizeof(bool); in TEST_BEGIN() 51 &sz, NULL, 0), 0, "Unexpected mallctl failure"); in TEST_BEGIN() 53 sz = sizeof(unsigned); in TEST_BEGIN() 63 sz = sizeof(size_t); in TEST_BEGIN() [all …]
|
| H A D | background_thread.c | 8 size_t sz = sizeof(bool); in test_switch_background_thread_ctl() local 11 assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, in test_switch_background_thread_ctl() 12 &e1, sz), 0, "Unexpected mallctl() failure"); in test_switch_background_thread_ctl() 27 size_t sz = sizeof(bool); in test_repeat_background_thread_ctl() local 30 assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, in test_repeat_background_thread_ctl() 31 &e1, sz), 0, "Unexpected mallctl() failure"); in test_repeat_background_thread_ctl() 47 size_t sz = sizeof(bool); in TEST_BEGIN() local 49 assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, in TEST_BEGIN() 51 assert_d_eq(mallctl("background_thread", (void *)&e1, &sz, in TEST_BEGIN()
|
| /f-stack/app/redis-5.0.5/deps/jemalloc/test/integration/ |
| H A D | thread_tcache_enabled.c | 6 size_t sz = sizeof(bool); in thd_start() local 12 assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, in thd_start() 13 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() 19 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() 24 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() 29 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() 34 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() 40 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() 46 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() 52 (void *)&e1, sz), 0, "Unexpected mallctl() error"); in thd_start() [all …]
|
| H A D | xallocx.c | 23 size_t sz, tsz; in TEST_BEGIN() local 38 size_t sz, tsz; in TEST_BEGIN() local 44 tsz = xallocx(p, sz, sz-42, 0); in TEST_BEGIN() 53 size_t sz, tsz; in TEST_BEGIN() local 321 sz = szmax; in test_zero() 325 sz); in test_zero() 336 sz = szmin; in test_zero() 337 if (xallocx(p, sz, 0, flags) != sz) { in test_zero() 344 for (sz = szmin; sz < szmax; sz = nsz) { in test_zero() 352 assert_false(validate_fill(p, 0x00, sz, nsz-sz), in test_zero() [all …]
|
| H A D | mallocx.c | 118 size_t sz; in TEST_BEGIN() local 120 for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { in TEST_BEGIN() 123 nsz = nallocx(sz, 0); in TEST_BEGIN() 125 p = mallocx(sz, 0); in TEST_BEGIN() 133 p = mallocx(sz, 0); in TEST_BEGIN() 176 for (sz = 1; in TEST_BEGIN() 177 sz < 3 * alignment && sz < (1U << 31); in TEST_BEGIN() 184 "size=%zu (%#zx)", alignment, sz, sz); in TEST_BEGIN() 189 "size=%zu (%#zx)", alignment, sz, sz); in TEST_BEGIN() 191 assert_zu_ge(rsz, sz, in TEST_BEGIN() [all …]
|
| H A D | allocated.c | 17 size_t sz, usize; in thd_start() local 19 sz = sizeof(a0); in thd_start() 27 sz = sizeof(ap0); in thd_start() 39 sz = sizeof(d0); in thd_start() 47 sz = sizeof(dp0); in thd_start() 63 sz = sizeof(a1); in thd_start() 64 mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); in thd_start() 65 sz = sizeof(ap1); in thd_start() 66 mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); in thd_start() 80 sz = sizeof(d1); in thd_start() [all …]
|
| H A D | sdallocx.c | 13 size_t nsz, sz, alignment, total; in TEST_BEGIN() local 25 for (sz = 1; in TEST_BEGIN() 26 sz < 3 * alignment && sz < (1U << 31); in TEST_BEGIN() 27 sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { in TEST_BEGIN() 29 nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | in TEST_BEGIN() 31 ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | in TEST_BEGIN() 40 sdallocx(ps[i], sz, in TEST_BEGIN()
|
| H A D | extent.c | 8 size_t sz = sizeof(bool); in check_background_thread_enabled() local 9 int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); in check_background_thread_enabled() 20 size_t large0, large1, large2, sz; in test_extent_body() local 29 sz = sizeof(size_t); in test_extent_body() 104 size_t old_size, new_size, sz; in test_manual_hook_auto_arena() local 111 sz = sizeof(unsigned); in test_manual_hook_auto_arena() 141 size_t old_size, new_size, sz; in test_manual_hook_body() local 148 sz = sizeof(unsigned); in test_manual_hook_body() 227 size_t new_size, sz; in TEST_BEGIN() local 232 sz = sizeof(unsigned); in TEST_BEGIN() [all …]
|
| /f-stack/freebsd/contrib/openzfs/include/os/linux/spl/sys/ |
| H A D | vmem.h | 90 #define vmem_alloc(sz, fl) spl_vmem_alloc((sz), (fl), __func__, __LINE__) argument 91 #define vmem_zalloc(sz, fl) spl_vmem_zalloc((sz), (fl), __func__, __LINE__) argument 92 #define vmem_free(ptr, sz) spl_vmem_free((ptr), (sz)) argument 94 extern void *spl_vmem_alloc(size_t sz, int fl, const char *func, int line); 95 extern void *spl_vmem_zalloc(size_t sz, int fl, const char *func, int line); 96 extern void spl_vmem_free(const void *ptr, size_t sz);
|
| /f-stack/freebsd/contrib/openzfs/module/unicode/ |
| H A D | u8_textprep.c | 341 int sz; in u8_validate() local 696 if (sz == 1 || sz > 4) in combining_class() 1383 int sz; in collect_a_seq() local 1455 s += sz; in collect_a_seq() 1456 sz = i; in collect_a_seq() 1496 i += sz; in collect_a_seq() 1611 l = sz; in collect_a_seq() 1633 sz = i; in collect_a_seq() 1912 int sz; in u8_textprep_str() local 1989 sz = 1; in u8_textprep_str() [all …]
|
| /f-stack/dpdk/lib/librte_graph/ |
| H A D | graph_populate.c | 20 size_t sz; in graph_fp_mem_calc_size() local 23 sz = sizeof(struct rte_graph); in graph_fp_mem_calc_size() 28 sz = RTE_ALIGN(sz, val); in graph_fp_mem_calc_size() 29 graph->cir_start = sz; in graph_fp_mem_calc_size() 31 sz += val; in graph_fp_mem_calc_size() 34 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); in graph_fp_mem_calc_size() 35 graph->nodes_start = sz; in graph_fp_mem_calc_size() 38 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); in graph_fp_mem_calc_size() 44 graph->mem_sz = sz; in graph_fp_mem_calc_size() 45 return sz; in graph_fp_mem_calc_size() [all …]
|
| /f-stack/app/redis-5.0.5/src/ |
| H A D | quicklist.c | 143 node->sz = 0; in quicklistCreateNode() 192 lzf->sz + MIN_COMPRESS_IMPROVE >= node->sz) { in __quicklistCompressNode() 418 #define sizeMeetsSafetyLimit(sz) ((sz) <= SIZE_SAFETY_LIMIT) argument 427 if (sz < 254) in _quicklistNodeAllowInsert() 433 if (sz < 64) in _quicklistNodeAllowInsert() 441 unsigned int new_sz = node->sz + sz + ziplist_overhead; in _quicklistNodeAllowInsert() 460 unsigned int merge_sz = a->sz + b->sz - 11; in _quicklistNodeAllowMerge() 1207 node->sz = current->sz; in quicklistDup() 1338 if (sz) in quicklistPopCustom() 1339 *sz = 0; in quicklistPopCustom() [all …]
|
| H A D | quicklist.h | 48 unsigned int sz; /* ziplist size in bytes */ member 63 unsigned int sz; /* LZF size in bytes*/ member 96 unsigned int sz; member 124 int quicklistPushHead(quicklist *quicklist, void *value, const size_t sz); 125 int quicklistPushTail(quicklist *quicklist, void *value, const size_t sz); 126 void quicklistPush(quicklist *quicklist, void *value, const size_t sz, 134 void *value, const size_t sz); 136 void *value, const size_t sz); 139 int sz); 153 unsigned int *sz, long long *sval, [all …]
|
| /f-stack/dpdk/app/test/ |
| H A D | test_pmd_ring_perf.c | 100 unsigned sz, i = 0; in test_bulk_enqueue_dequeue() local 103 for (sz = 0; sz < RTE_DIM(bulk_sizes); sz++) { in test_bulk_enqueue_dequeue() 107 bulk_sizes[sz], NULL); in test_bulk_enqueue_dequeue() 109 bulk_sizes[sz], NULL); in test_bulk_enqueue_dequeue() 116 rte_eth_tx_burst(ring_ethdev_port, 0, burst, bulk_sizes[sz]); in test_bulk_enqueue_dequeue() 117 rte_eth_rx_burst(ring_ethdev_port, 0, burst, bulk_sizes[sz]); in test_bulk_enqueue_dequeue() 123 (iterations * bulk_sizes[sz])); in test_bulk_enqueue_dequeue() 125 (iterations * bulk_sizes[sz])); in test_bulk_enqueue_dequeue() 127 printf("ring bulk enq/deq (size: %u) : %.1F\n", bulk_sizes[sz], in test_bulk_enqueue_dequeue() 129 printf("ethdev bulk enq/deq (size:%u): %.1F\n", bulk_sizes[sz], in test_bulk_enqueue_dequeue()
|
| H A D | test_trace_perf.c | 133 memset(data, 0, sz); in WORKER_DEFINE() 152 size_t sz; in test_trace_perf() local 162 sz = sizeof(struct test_data); in test_trace_perf() 163 sz += nb_workers * sizeof(struct lcore_data); in test_trace_perf() 165 data = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE); in test_trace_perf() 171 run_test("void", worker_fn_GENERIC_VOID, data, sz); in test_trace_perf() 172 run_test("u64", worker_fn_GENERIC_U64, data, sz); in test_trace_perf() 173 run_test("int", worker_fn_GENERIC_INT, data, sz); in test_trace_perf() 174 run_test("float", worker_fn_GENERIC_FLOAT, data, sz); in test_trace_perf() 176 run_test("string", worker_fn_GENERIC_STR, data, sz); in test_trace_perf() [all …]
|
| /f-stack/freebsd/net/route/ |
| H A D | nhgrp_ctl.c | 193 size_t sz; in get_nhgrp_alloc_size() local 197 sz += sizeof(struct nhgrp_priv); in get_nhgrp_alloc_size() 199 return (sz); in get_nhgrp_alloc_size() 519 size_t sz; in append_nhops() local 529 if (sz <= sizeof(storage)) in append_nhops() 593 size_t sz; in nhgrp_get_filtered_group() local 600 if (sz <= sizeof(storage)) in nhgrp_get_filtered_group() 709 size_t sz; in dump_nhgrp_entry() local 723 bzero(buffer, sz); in dump_nhgrp_entry() 726 rtm->rtm_msglen = sz; in dump_nhgrp_entry() [all …]
|
| /f-stack/app/redis-5.0.5/deps/jemalloc/msvc/test_threads/ |
| H A D | test_threads.cpp | 22 size_t sz = sizeof(narenas); in test_threads() local 23 je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); in test_threads() 51 const int sz = sizes[x]; in test_threads() local 52 ptrsz[j] = sz; in test_threads() 53 ptrs[j] = (uint8_t*)je_malloc(sz); in test_threads() 55 … printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x); in test_threads() 58 for (int k = 0; k < sz; k++) in test_threads() 62 for (int k = 0, sz = ptrsz[j]; k < sz; k++) in test_threads() local
|
| /f-stack/freebsd/kern/ |
| H A D | subr_physmem.c | 314 physmem_hardware_region(uint64_t pa, uint64_t sz) in physmem_hardware_region() argument 323 if (sz <= PAGE_SIZE) in physmem_hardware_region() 326 sz -= PAGE_SIZE; in physmem_hardware_region() 343 if ((pa + sz) > (MAX_PHYS_ADDR - 1024 * 1024)) { in physmem_hardware_region() 344 sz = MAX_PHYS_ADDR - pa + 1; in physmem_hardware_region() 345 if (sz <= 1024 * 1024) in physmem_hardware_region() 347 sz -= 1024 * 1024; in physmem_hardware_region() 356 sz = trunc_page(sz - adj); in physmem_hardware_region() 358 if (sz > 0 && hwcnt < nitems(hwregions)) in physmem_hardware_region() 376 sz = round_page(sz + adj); in physmem_exclude_region() [all …]
|
| /f-stack/dpdk/lib/librte_eal/freebsd/ |
| H A D | eal_timer.c | 31 size_t sz; in get_tsc_freq() local 35 sz = sizeof(tmp); in get_tsc_freq() 38 if (sysctlbyname("kern.timecounter.smp_tsc", &tmp, &sz, NULL, 0)) in get_tsc_freq() 45 if (sysctlbyname("kern.timecounter.invariant_tsc", &tmp, &sz, NULL, 0)) in get_tsc_freq() 50 sz = sizeof(tsc_hz); in get_tsc_freq() 51 if (sysctlbyname("machdep.tsc_freq", &tsc_hz, &sz, NULL, 0)) { in get_tsc_freq()
|