1 #ifndef JEMALLOC_INTERNAL_H
2 #define JEMALLOC_INTERNAL_H
8 #include <sys/ktrace.h>
11 #define JEMALLOC_NO_DEMANGLE
13 # define JEMALLOC_N(n) jet_##n
15 # define JEMALLOC_NO_RENAME
16 # include "../jemalloc.h"
17 # undef JEMALLOC_NO_RENAME
19 # define JEMALLOC_N(n) je_##n
20 # include "../jemalloc.h"
46 #ifdef JEMALLOC_LAZY_LOCK
61 #ifdef JEMALLOC_PROF_LIBGCC
68 #ifdef JEMALLOC_PROF_LIBUNWIND
75 #ifdef JEMALLOC_MAPS_COALESCE
82 #ifdef JEMALLOC_MUNMAP
96 #ifdef JEMALLOC_TCACHE
117 #ifdef JEMALLOC_UTRACE
124 #ifdef JEMALLOC_VALGRIND
131 #ifdef JEMALLOC_XMALLOC
138 #ifdef JEMALLOC_IVSALLOC
145 #ifdef JEMALLOC_CACHE_OBLIVIOUS
152 #ifdef JEMALLOC_ATOMIC9
153 #include <machine/atomic.h>
156 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
157 #include <libkern/OSAtomic.h>
161 #include <mach/mach_error.h>
162 #include <mach/mach_init.h>
163 #include <mach/vm_map.h>
188 #define JEMALLOC_H_TYPES
209 #define MALLOCX_ARENA_MASK ((int)~0xfffff)
210 #define MALLOCX_ARENA_MAX 0xffe
211 #define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
212 #define MALLOCX_TCACHE_MAX 0xffd
213 #define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
215 #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
216 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
217 #define MALLOCX_ALIGN_GET(flags) \
218 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
219 #define MALLOCX_ZERO_GET(flags) \
220 ((bool)(flags & MALLOCX_ZERO))
222 #define MALLOCX_TCACHE_GET(flags) \
223 (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
224 #define MALLOCX_ARENA_GET(flags) \
225 (((unsigned)(((unsigned)flags) >> 20)) - 1)
228 #define TINY_MIN (1U << LG_TINY_MIN)
235 # if (defined(__i386__) || defined(_M_IX86))
236 # define LG_QUANTUM 4
239 # define LG_QUANTUM 4
242 # define LG_QUANTUM 4
244 # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
245 # define LG_QUANTUM 4
247 # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
248 # define LG_QUANTUM 4
251 # define LG_QUANTUM 3
254 # define LG_QUANTUM 4
257 # define LG_QUANTUM 4
260 # define LG_QUANTUM 3
263 # define LG_QUANTUM 3
266 # define LG_QUANTUM 3
269 # define LG_QUANTUM 4
271 # if defined(__riscv) || defined(__riscv__)
272 # define LG_QUANTUM 4
275 # define LG_QUANTUM 4
278 # define LG_QUANTUM 4
281 # define LG_QUANTUM 4
284 # define LG_QUANTUM 4
287 # error "Unknown minimum alignment for architecture; specify via "
292 #define QUANTUM ((size_t)(1U << LG_QUANTUM))
293 #define QUANTUM_MASK (QUANTUM - 1)
296 #define QUANTUM_CEILING(a) \
297 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
299 #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
300 #define LONG_MASK (LONG - 1)
303 #define LONG_CEILING(a) \
304 (((a) + LONG_MASK) & ~LONG_MASK)
306 #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
307 #define PTR_MASK (SIZEOF_PTR - 1)
310 #define PTR_CEILING(a) \
311 (((a) + PTR_MASK) & ~PTR_MASK)
320 #define LG_CACHELINE 6
322 #define CACHELINE_MASK (CACHELINE - 1)
325 #define CACHELINE_CEILING(s) \
326 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
332 #define PAGE ((size_t)(1U << LG_PAGE))
333 #define PAGE_MASK ((size_t)(PAGE - 1))
336 #define PAGE_ADDR2BASE(a) \
337 ((void *)((uintptr_t)(a) & ~PAGE_MASK))
340 #define PAGE_CEILING(s) \
341 (((s) + PAGE_MASK) & ~PAGE_MASK)
344 #define ALIGNMENT_ADDR2BASE(a, alignment) \
345 ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
348 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
349 ((size_t)((uintptr_t)(a) & (alignment - 1)))
352 #define ALIGNMENT_CEILING(s, alignment) \
353 (((s) + (alignment - 1)) & ((~(alignment)) + 1))
356 #if __STDC_VERSION__ < 199901L
359 # define alloca _alloca
363 # define VARIABLE_ARRAY(type, name, count) \
364 type *name = alloca(sizeof(type) * (count))
366 # define VARIABLE_ARRAY(type, name, count) type name[(count)]
398 #undef JEMALLOC_H_TYPES
400 #define JEMALLOC_H_STRUCTS
418 #define JEMALLOC_ARENA_STRUCTS_A
420 #undef JEMALLOC_ARENA_STRUCTS_A
422 #define JEMALLOC_ARENA_STRUCTS_B
424 #undef JEMALLOC_ARENA_STRUCTS_B
437 #undef JEMALLOC_H_STRUCTS
439 #define JEMALLOC_H_EXTERNS
455 extern unsigned ncpus;
534 #undef JEMALLOC_H_EXTERNS
536 #define JEMALLOC_H_INLINES
561 #ifndef JEMALLOC_ENABLE_INLINE
581 bool refresh_if_missing);
592 #define JEMALLOC_ARENA_INLINE_A
594 #undef JEMALLOC_ARENA_INLINE_A
596 #define JEMALLOC_ARENA_INLINE_B
598 #undef JEMALLOC_ARENA_INLINE_B
arena_t * arena_ichoose(tsd_t *tsd, arena_t *arena)
static const bool config_utrace
void * a0malloc(size_t size)
static const bool config_stats
static const bool config_prof_libgcc
static const bool config_ivsalloc
size_t pind2sz_compute(pszind_t pind)
static const bool config_valgrind
static const bool config_tls
ticker_t * decay_ticker_get(tsd_t *tsd, unsigned ind)
arena_t * arena_init(tsdn_t *tsdn, unsigned ind)
szind_t size2index_compute(size_t size)
arena_t * arena_choose(tsd_t *tsd, arena_t *arena)
szind_t size2index(size_t size)
void arenas_tdata_cleanup(tsd_t *tsd)
szind_t size2index_lookup(size_t size)
static const char *const config_malloc_conf
size_t pind2sz(pszind_t pind)
static const bool config_prof_libunwind
size_t pind2sz_lookup(pszind_t pind)
static const bool config_cache_oblivious
unsigned narenas_total_get(void)
size_t const index2size_tab[NSIZES]
size_t s2u_compute(size_t size)
static const bool have_dss
void iarena_cleanup(tsd_t *tsd)
static const bool config_prof
void narenas_tdata_cleanup(tsd_t *tsd)
void * bootstrap_calloc(size_t num, size_t size)
void thread_deallocated_cleanup(tsd_t *tsd)
static const bool config_tcache
arena_t * arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
void * bootstrap_malloc(size_t size)
static const bool config_thp
static const bool config_debug
size_t index2size_compute(szind_t index)
void jemalloc_postfork_child(void)
void arenas_tdata_bypass_cleanup(tsd_t *tsd)
void bootstrap_free(void *ptr)
pszind_t psz2ind(size_t psz)
size_t index2size(szind_t index)
static const bool maps_coalesce
static const bool config_munmap
arena_tdata_t * arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
size_t s2u_lookup(size_t size)
static const bool config_xmalloc
void jemalloc_prefork(void)
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
void thread_allocated_cleanup(tsd_t *tsd)
void jemalloc_postfork_parent(void)
size_t sa2u(size_t size, size_t alignment)
arena_tdata_t * arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
void arena_cleanup(tsd_t *tsd)
arena_t * arena_choose_hard(tsd_t *tsd, bool internal)
static const bool config_fill
static const bool config_lazy_lock
size_t index2size_lookup(szind_t index)
arena_t * arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
size_t const pind2sz_tab[NPSIZES]
uint8_t const size2index_tab[]
#define JEMALLOC_CONFIG_MALLOC_CONF
static static fork const void static count static fd const char const char static newpath char char char static envp time_t static t const char static mode static whence const char static dir time_t static t unsigned static seconds const char struct utimbuf static buf static inc static sig const char static mode static oldfd struct tms static buf static getgid static geteuid const char static filename static arg static mask struct ustat static ubuf static getppid static setsid static egid sigset_t static set struct timeval struct timezone static tz fd_set fd_set fd_set struct timeval static timeout const char char static bufsiz const char static swapflags void static offset const char static length static mode static who const char struct statfs static buf unsigned unsigned num
Data types and functions used in many places in liblzma API.
Miscellaneous utility functions.