Rizin
unix-like reverse engineering framework and cli tools
arena.h
Go to the documentation of this file.
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3 
4 #define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
5 
6 /* Maximum number of regions in one run. */
7 #define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
8 #define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
9 
10 /*
11  * Minimum redzone size. Redzones may be larger than this if necessary to
12  * preserve region alignment.
13  */
14 #define REDZONE_MINSIZE 16
15 
16 /*
17  * The minimum ratio of active:dirty pages per arena is computed as:
18  *
19  * (nactive >> lg_dirty_mult) >= ndirty
20  *
21  * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
22  * many active pages as dirty pages.
23  */
24 #define LG_DIRTY_MULT_DEFAULT 3
25 
26 typedef enum {
27  purge_mode_ratio = 0,
28  purge_mode_decay = 1,
29 
30  purge_mode_limit = 2
31 } purge_mode_t;
32 #define PURGE_DEFAULT purge_mode_ratio
33 /* Default decay time in seconds. */
34 #define DECAY_TIME_DEFAULT 10
35 /* Number of event ticks between time checks. */
36 #define DECAY_NTICKS_PER_UPDATE 1000
37 
38 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
39 typedef struct arena_avail_links_s arena_avail_links_t;
40 typedef struct arena_run_s arena_run_t;
41 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
42 typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
43 typedef struct arena_chunk_s arena_chunk_t;
44 typedef struct arena_bin_info_s arena_bin_info_t;
45 typedef struct arena_decay_s arena_decay_t;
46 typedef struct arena_bin_s arena_bin_t;
47 typedef struct arena_s arena_t;
48 typedef struct arena_tdata_s arena_tdata_t;
49 
50 #endif /* JEMALLOC_H_TYPES */
51 /******************************************************************************/
52 #ifdef JEMALLOC_H_STRUCTS
53 
54 #ifdef JEMALLOC_ARENA_STRUCTS_A
55 struct arena_run_s {
56  /* Index of bin this run is associated with. */
57  szind_t binind;
58 
59  /* Number of free regions in run. */
60  unsigned nfree;
61 
62  /* Per region allocated/deallocated bitmap. */
63  bitmap_t bitmap[BITMAP_GROUPS_MAX];
64 };
65 
66 /* Each element of the chunk map corresponds to one page within the chunk. */
67 struct arena_chunk_map_bits_s {
68  /*
69  * Run address (or size) and various flags are stored together. The bit
70  * layout looks like (assuming 32-bit system):
71  *
72  * ???????? ???????? ???nnnnn nnndumla
73  *
74  * ? : Unallocated: Run address for first/last pages, unset for internal
75  * pages.
76  * Small: Run page offset.
77  * Large: Run page count for first page, unset for trailing pages.
78  * n : binind for small size class, BININD_INVALID for large size class.
79  * d : dirty?
80  * u : unzeroed?
81  * m : decommitted?
82  * l : large?
83  * a : allocated?
84  *
85  * Following are example bit patterns for the three types of runs.
86  *
87  * p : run page offset
88  * s : run size
89  * n : binind for size class; large objects set these to BININD_INVALID
90  * x : don't care
91  * - : 0
92  * + : 1
93  * [DUMLA] : bit set
94  * [dumla] : bit unset
95  *
96  * Unallocated (clean):
97  * ssssssss ssssssss sss+++++ +++dum-a
98  * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
99  * ssssssss ssssssss sss+++++ +++dUm-a
100  *
101  * Unallocated (dirty):
102  * ssssssss ssssssss sss+++++ +++D-m-a
103  * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
104  * ssssssss ssssssss sss+++++ +++D-m-a
105  *
106  * Small:
107  * pppppppp pppppppp pppnnnnn nnnd---A
108  * pppppppp pppppppp pppnnnnn nnn----A
109  * pppppppp pppppppp pppnnnnn nnnd---A
110  *
111  * Large:
112  * ssssssss ssssssss sss+++++ +++D--LA
113  * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
114  * -------- -------- ---+++++ +++D--LA
115  *
116  * Large (sampled, size <= LARGE_MINCLASS):
117  * ssssssss ssssssss sssnnnnn nnnD--LA
118  * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
119  * -------- -------- ---+++++ +++D--LA
120  *
121  * Large (not sampled, size == LARGE_MINCLASS):
122  * ssssssss ssssssss sss+++++ +++D--LA
123  * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
124  * -------- -------- ---+++++ +++D--LA
125  */
126  size_t bits;
127 #define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
128 #define CHUNK_MAP_LARGE ((size_t)0x02U)
129 #define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
130 
131 #define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
132 #define CHUNK_MAP_UNZEROED ((size_t)0x08U)
133 #define CHUNK_MAP_DIRTY ((size_t)0x10U)
134 #define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
135 
136 #define CHUNK_MAP_BININD_SHIFT 5
137 #define BININD_INVALID ((size_t)0xffU)
138 #define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
139 #define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
140 
141 #define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
142 #define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
143 #define CHUNK_MAP_SIZE_MASK \
144  (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
145 };
146 
147 struct arena_runs_dirty_link_s {
148  qr(arena_runs_dirty_link_t) rd_link;
149 };
150 
151 /*
152  * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
153  * like arena_chunk_map_bits_t. Two separate arrays are stored within each
154  * chunk header in order to improve cache locality.
155  */
156 struct arena_chunk_map_misc_s {
157  /*
158  * Linkage for run heaps. There are two disjoint uses:
159  *
160  * 1) arena_t's runs_avail heaps.
161  * 2) arena_run_t conceptually uses this linkage for in-use non-full
162  * runs, rather than directly embedding linkage.
163  */
164  phn(arena_chunk_map_misc_t) ph_link;
165 
166  union {
167  /* Linkage for list of dirty runs. */
168  arena_runs_dirty_link_t rd;
169 
170  /* Profile counters, used for large object runs. */
171  union {
172  void *prof_tctx_pun;
173  prof_tctx_t *prof_tctx;
174  };
175 
176  /* Small region run metadata. */
177  arena_run_t run;
178  };
179 };
180 typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
181 #endif /* JEMALLOC_ARENA_STRUCTS_A */
182 
183 #ifdef JEMALLOC_ARENA_STRUCTS_B
184 /* Arena chunk header. */
185 struct arena_chunk_s {
186  /*
187  * A pointer to the arena that owns the chunk is stored within the node.
188  * This field as a whole is used by chunks_rtree to support both
189  * ivsalloc() and core-based debugging.
190  */
191  extent_node_t node;
192 
193  /*
194  * True if memory could be backed by transparent huge pages. This is
195  * only directly relevant to Linux, since it is the only supported
196  * platform on which jemalloc interacts with explicit transparent huge
197  * page controls.
198  */
199  bool hugepage;
200 
201  /*
202  * Map of pages within chunk that keeps track of free/large/small. The
203  * first map_bias entries are omitted, since the chunk header does not
204  * need to be tracked in the map. This omission saves a header page
205  * for common chunk sizes (e.g. 4 MiB).
206  */
207  arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
208 };
209 
210 /*
211  * Read-only information associated with each element of arena_t's bins array
212  * is stored separately, partly to reduce memory usage (only one copy, rather
213  * than one per arena), but mainly to avoid false cacheline sharing.
214  *
215  * Each run has the following layout:
216  *
217  * /--------------------\
218  * | pad? |
219  * |--------------------|
220  * | redzone |
221  * reg0_offset | region 0 |
222  * | redzone |
223  * |--------------------| \
224  * | redzone | |
225  * | region 1 | > reg_interval
226  * | redzone | /
227  * |--------------------|
228  * | ... |
229  * | ... |
230  * | ... |
231  * |--------------------|
232  * | redzone |
233  * | region nregs-1 |
234  * | redzone |
235  * |--------------------|
236  * | alignment pad? |
237  * \--------------------/
238  *
239  * reg_interval has at least the same minimum alignment as reg_size; this
240  * preserves the alignment constraint that sa2u() depends on. Alignment pad is
241  * either 0 or redzone_size; it is present only if needed to align reg0_offset.
242  */
243 struct arena_bin_info_s {
244  /* Size of regions in a run for this bin's size class. */
245  size_t reg_size;
246 
247  /* Redzone size. */
248  size_t redzone_size;
249 
250  /* Interval between regions (reg_size + (redzone_size << 1)). */
251  size_t reg_interval;
252 
253  /* Total size of a run for this bin's size class. */
254  size_t run_size;
255 
256  /* Total number of regions in a run for this bin's size class. */
257  uint32_t nregs;
258 
259  /*
260  * Metadata used to manipulate bitmaps for runs associated with this
261  * bin.
262  */
263  bitmap_info_t bitmap_info;
264 
265  /* Offset of first region in a run for this bin's size class. */
266  uint32_t reg0_offset;
267 };
268 
269 struct arena_decay_s {
270  /*
271  * Approximate time in seconds from the creation of a set of unused
272  * dirty pages until an equivalent set of unused dirty pages is purged
273  * and/or reused.
274  */
275  ssize_t time;
276  /* time / SMOOTHSTEP_NSTEPS. */
277  nstime_t interval;
278  /*
279  * Time at which the current decay interval logically started. We do
280  * not actually advance to a new epoch until sometime after it starts
281  * because of scheduling and computation delays, and it is even possible
282  * to completely skip epochs. In all cases, during epoch advancement we
283  * merge all relevant activity into the most recently recorded epoch.
284  */
285  nstime_t epoch;
286  /* Deadline randomness generator. */
287  uint64_t jitter_state;
288  /*
289  * Deadline for current epoch. This is the sum of interval and per
290  * epoch jitter which is a uniform random variable in [0..interval).
291  * Epochs always advance by precise multiples of interval, but we
292  * randomize the deadline to reduce the likelihood of arenas purging in
293  * lockstep.
294  */
295  nstime_t deadline;
296  /*
297  * Number of dirty pages at beginning of current epoch. During epoch
298  * advancement we use the delta between arena->decay.ndirty and
299  * arena->ndirty to determine how many dirty pages, if any, were
300  * generated.
301  */
302  size_t ndirty;
303  /*
304  * Trailing log of how many unused dirty pages were generated during
305  * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
306  * element is the most recent epoch. Corresponding epoch times are
307  * relative to epoch.
308  */
309  size_t backlog[SMOOTHSTEP_NSTEPS];
310 };
311 
312 struct arena_bin_s {
313  /*
314  * All operations on runcur, runs, and stats require that lock be
315  * locked. Run allocation/deallocation are protected by the arena lock,
316  * which may be acquired while holding one or more bin locks, but not
317  * vise versa.
318  */
319  malloc_mutex_t lock;
320 
321  /*
322  * Current run being used to service allocations of this bin's size
323  * class.
324  */
325  arena_run_t *runcur;
326 
327  /*
328  * Heap of non-full runs. This heap is used when looking for an
329  * existing run when runcur is no longer usable. We choose the
330  * non-full run that is lowest in memory; this policy tends to keep
331  * objects packed well, and it can also help reduce the number of
332  * almost-empty chunks.
333  */
334  arena_run_heap_t runs;
335 
336  /* Bin statistics. */
337  malloc_bin_stats_t stats;
338 };
339 
340 struct arena_s {
341  /* This arena's index within the arenas array. */
342  unsigned ind;
343 
344  /*
345  * Number of threads currently assigned to this arena, synchronized via
346  * atomic operations. Each thread has two distinct assignments, one for
347  * application-serving allocation, and the other for internal metadata
348  * allocation. Internal metadata must not be allocated from arenas
349  * created via the arenas.extend mallctl, because the arena.<i>.reset
350  * mallctl indiscriminately discards all allocations for the affected
351  * arena.
352  *
353  * 0: Application allocation.
354  * 1: Internal metadata allocation.
355  */
356  unsigned nthreads[2];
357 
358  /*
359  * There are three classes of arena operations from a locking
360  * perspective:
361  * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
362  * 2) Bin-related operations are protected by bin locks.
363  * 3) Chunk- and run-related operations are protected by this mutex.
364  */
365  malloc_mutex_t lock;
366 
367  arena_stats_t stats;
368  /*
369  * List of tcaches for extant threads associated with this arena.
370  * Stats from these are merged incrementally, and at exit if
371  * opt_stats_print is enabled.
372  */
373  ql_head(tcache_t) tcache_ql;
374 
375  uint64_t prof_accumbytes;
376 
377  /*
378  * PRNG state for cache index randomization of large allocation base
379  * pointers.
380  */
381  size_t offset_state;
382 
383  dss_prec_t dss_prec;
384 
385  /* Extant arena chunks. */
386  ql_head(extent_node_t) achunks;
387 
388  /* Extent serial number generator state. */
389  size_t extent_sn_next;
390 
391  /*
392  * In order to avoid rapid chunk allocation/deallocation when an arena
393  * oscillates right on the cusp of needing a new chunk, cache the most
394  * recently freed chunk. The spare is left in the arena's chunk trees
395  * until it is deleted.
396  *
397  * There is one spare chunk per arena, rather than one spare total, in
398  * order to avoid interactions between multiple threads that could make
399  * a single spare inadequate.
400  */
401  arena_chunk_t *spare;
402 
403  /* Minimum ratio (log base 2) of nactive:ndirty. */
404  ssize_t lg_dirty_mult;
405 
406  /* True if a thread is currently executing arena_purge_to_limit(). */
407  bool purging;
408 
409  /* Number of pages in active runs and huge regions. */
410  size_t nactive;
411 
412  /*
413  * Current count of pages within unused runs that are potentially
414  * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
415  * By tracking this, we can institute a limit on how much dirty unused
416  * memory is mapped for each arena.
417  */
418  size_t ndirty;
419 
420  /*
421  * Unused dirty memory this arena manages. Dirty memory is conceptually
422  * tracked as an arbitrarily interleaved LRU of dirty runs and cached
423  * chunks, but the list linkage is actually semi-duplicated in order to
424  * avoid extra arena_chunk_map_misc_t space overhead.
425  *
426  * LRU-----------------------------------------------------------MRU
427  *
428  * /-- arena ---\
429  * | |
430  * | |
431  * |------------| /- chunk -\
432  * ...->|chunks_cache|<--------------------------->| /----\ |<--...
433  * |------------| | |node| |
434  * | | | | | |
435  * | | /- run -\ /- run -\ | | | |
436  * | | | | | | | | | |
437  * | | | | | | | | | |
438  * |------------| |-------| |-------| | |----| |
439  * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
440  * |------------| |-------| |-------| | |----| |
441  * | | | | | | | | | |
442  * | | | | | | | \----/ |
443  * | | \-------/ \-------/ | |
444  * | | | |
445  * | | | |
446  * \------------/ \---------/
447  */
448  arena_runs_dirty_link_t runs_dirty;
449  extent_node_t chunks_cache;
450 
451  /* Decay-based purging state. */
452  arena_decay_t decay;
453 
454  /* Extant huge allocations. */
455  ql_head(extent_node_t) huge;
456  /* Synchronizes all huge allocation/update/deallocation. */
457  malloc_mutex_t huge_mtx;
458 
459  /*
460  * Trees of chunks that were previously allocated (trees differ only in
461  * node ordering). These are used when allocating chunks, in an attempt
462  * to re-use address space. Depending on function, different tree
463  * orderings are needed, which is why there are two trees with the same
464  * contents.
465  */
466  extent_tree_t chunks_szsnad_cached;
467  extent_tree_t chunks_ad_cached;
468  extent_tree_t chunks_szsnad_retained;
469  extent_tree_t chunks_ad_retained;
470 
471  malloc_mutex_t chunks_mtx;
472  /* Cache of nodes that were allocated via base_alloc(). */
473  ql_head(extent_node_t) node_cache;
474  malloc_mutex_t node_cache_mtx;
475 
476  /* User-configurable chunk hook functions. */
477  chunk_hooks_t chunk_hooks;
478 
479  /* bins is used to store trees of free regions. */
480  arena_bin_t bins[JM_NBINS];
481 
482  /*
483  * Size-segregated address-ordered heaps of this arena's available runs,
484  * used for first-best-fit run allocation. Runs are quantized, i.e.
485  * they reside in the last heap which corresponds to a size class less
486  * than or equal to the run size.
487  */
488  arena_run_heap_t runs_avail[NPSIZES];
489 };
490 
491 /* Used in conjunction with tsd for fast arena-related context lookup. */
492 struct arena_tdata_s {
493  ticker_t decay_ticker;
494 };
495 #endif /* JEMALLOC_ARENA_STRUCTS_B */
496 
497 #endif /* JEMALLOC_H_STRUCTS */
498 /******************************************************************************/
499 #ifdef JEMALLOC_H_EXTERNS
500 
501 static const size_t large_pad =
502 #ifdef JEMALLOC_CACHE_OBLIVIOUS
503  PAGE
504 #else
505  0
506 #endif
507  ;
508 
509 extern bool opt_thp;
510 extern purge_mode_t opt_purge;
511 extern const char *purge_mode_names[];
513 extern ssize_t opt_decay_time;
514 
515 extern arena_bin_info_t arena_bin_info[JM_NBINS];
516 
517 extern size_t map_bias; /* Number of arena chunk header pages. */
518 extern size_t map_misc_offset;
519 extern size_t arena_maxrun; /* Max run size for arenas. */
520 extern size_t large_maxclass; /* Max large size class. */
521 extern unsigned nlclasses; /* Number of large size classes. */
522 extern unsigned nhclasses; /* Number of huge size classes. */
523 
524 #ifdef JEMALLOC_JET
525 typedef size_t (run_quantize_t)(size_t);
526 extern run_quantize_t *run_quantize_floor;
527 extern run_quantize_t *run_quantize_ceil;
528 #endif
529 void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
530  bool cache);
531 void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
532  bool cache);
533 extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
534 void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
535 void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
536  size_t alignment, size_t *sn, bool *zero);
537 void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
538  size_t usize, size_t sn);
539 void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
540  void *chunk, size_t oldsize, size_t usize);
541 void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
542  void *chunk, size_t oldsize, size_t usize, size_t sn);
543 bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
544  void *chunk, size_t oldsize, size_t usize, bool *zero);
545 ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
546 bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
547  ssize_t lg_dirty_mult);
548 ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
549 bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
550 void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
551 void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
552 void arena_reset(tsd_t *tsd, arena_t *arena);
553 void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
554  tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
555 void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
556  bool zero);
557 #ifdef JEMALLOC_JET
558 typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
559  uint8_t);
560 extern arena_redzone_corruption_t *arena_redzone_corruption;
561 typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
562 extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
563 #else
564 void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
565 #endif
566 void arena_quarantine_junk_small(void *ptr, size_t usize);
567 void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
568  bool zero);
569 void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
570  szind_t ind, bool zero);
571 void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
572  size_t alignment, bool zero, tcache_t *tcache);
573 void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
574 void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
575  arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
576 void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
577  void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
578 void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
579  void *ptr, size_t pageind);
580 #ifdef JEMALLOC_JET
581 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
582 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
583 #else
584 void arena_dalloc_junk_large(void *ptr, size_t usize);
585 #endif
586 void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
587  arena_chunk_t *chunk, void *ptr);
588 void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
589  void *ptr);
590 #ifdef JEMALLOC_JET
591 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
592 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
593 #endif
594 bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
595  size_t size, size_t extra, bool zero);
596 void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
597  size_t size, size_t alignment, bool zero, tcache_t *tcache);
598 dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
599 bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
601 bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
603 bool arena_decay_time_default_set(ssize_t decay_time);
604 void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
605  unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
606  ssize_t *decay_time, size_t *nactive, size_t *ndirty);
607 void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
608  const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
609  size_t *nactive, size_t *ndirty, arena_stats_t *astats,
610  malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
611  malloc_huge_stats_t *hstats);
612 unsigned arena_nthreads_get(arena_t *arena, bool internal);
613 void arena_nthreads_inc(arena_t *arena, bool internal);
614 void arena_nthreads_dec(arena_t *arena, bool internal);
615 size_t arena_extent_sn_next(arena_t *arena);
616 arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
617 void arena_boot(void);
618 void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
619 void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
620 void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
621 void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
622 void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
623 void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
624 
625 #endif /* JEMALLOC_H_EXTERNS */
626 /******************************************************************************/
627 #ifdef JEMALLOC_H_INLINES
628 
629 #ifndef JEMALLOC_ENABLE_INLINE
630 arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
631  size_t pageind);
632 const arena_chunk_map_bits_t *arena_bitselm_get_const(
633  const arena_chunk_t *chunk, size_t pageind);
634 arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
635  size_t pageind);
636 const arena_chunk_map_misc_t *arena_miscelm_get_const(
637  const arena_chunk_t *chunk, size_t pageind);
638 size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
639 void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
640 arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
641 arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
642 size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
643 const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
644  size_t pageind);
645 size_t arena_mapbitsp_read(const size_t *mapbitsp);
646 size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
647 size_t arena_mapbits_size_decode(size_t mapbits);
648 size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
649  size_t pageind);
650 size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
651  size_t pageind);
652 size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
653  size_t pageind);
654 szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
655 size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
656 size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
657 size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
658  size_t pageind);
659 size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
660 size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
661 void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
662 size_t arena_mapbits_size_encode(size_t size);
663 void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
664  size_t size, size_t flags);
665 void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
666  size_t size);
667 void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
668  size_t flags);
669 void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
670  size_t size, size_t flags);
671 void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
672  szind_t binind);
673 void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
674  size_t runind, szind_t binind, size_t flags);
675 void arena_metadata_allocated_add(arena_t *arena, size_t size);
676 void arena_metadata_allocated_sub(arena_t *arena, size_t size);
677 size_t arena_metadata_allocated_get(arena_t *arena);
678 bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
679 bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
680 bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
681 szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
682 szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
683 size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
684  const void *ptr);
685 prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
686 void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
687  prof_tctx_t *tctx);
688 void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
689  const void *old_ptr, prof_tctx_t *old_tctx);
690 void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
691 void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
692 void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
693  bool zero, tcache_t *tcache, bool slow_path);
694 arena_t *arena_aalloc(const void *ptr);
695 size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
696 void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
697 void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
698  bool slow_path);
699 #endif
700 
701 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
702 # ifdef JEMALLOC_ARENA_INLINE_A
703 JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
704 arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
705 {
706  if (unlikely((pageind <= map_bias) || (pageind > chunk_npages)))
707  return (NULL);
708  return (&chunk->map_bits[pageind-map_bias]);
709 }
710 
711 JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
712 arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
713 {
714 
715  return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
716 }
717 
718 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
719 arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
720 {
721  if (unlikely((pageind <= map_bias) || (pageind > chunk_npages)))
722  return (NULL);
723 
724  return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
725  (uintptr_t)map_misc_offset) + pageind-map_bias);
726 }
727 
728 JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
729 arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
730 {
731 
732  return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
733 }
734 
736 arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
737 {
738  arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
739  size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
740  map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
741 
742  assert(pageind >= map_bias);
743  assert(pageind < chunk_npages);
744 
745  return (pageind);
746 }
747 
749 arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
750 {
751  arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
752  size_t pageind = arena_miscelm_to_pageind(miscelm);
753 
754  return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
755 }
756 
757 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
758 arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
759 {
760  arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
761  *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
762  if (unlikely((arena_miscelm_to_pageind (miscelm) <= map_bias)
763  || arena_miscelm_to_pageind (miscelm) > chunk_npages))
764  return (NULL);
765  return (miscelm);
766 }
767 
768 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
769 arena_run_to_miscelm(arena_run_t *run)
770 {
771  arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
772  *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
773 
774  if (unlikely((arena_miscelm_to_pageind (miscelm) <= map_bias)
775  || arena_miscelm_to_pageind (miscelm) > chunk_npages))
776  return (NULL);
777 
778  return (miscelm);
779 }
780 
781 JEMALLOC_ALWAYS_INLINE size_t *
782 arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
783 {
784 
785  return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
786 }
787 
788 JEMALLOC_ALWAYS_INLINE const size_t *
789 arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
790 {
791 
792  return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
793 }
794 
796 arena_mapbitsp_read(const size_t *mapbitsp)
797 {
798 
799  return (*mapbitsp);
800 }
801 
803 arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
804 {
805 
807 }
808 
810 arena_mapbits_size_decode(size_t mapbits)
811 {
812  size_t size;
813 
814 #if CHUNK_MAP_SIZE_SHIFT > 0
815  size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
816 #elif CHUNK_MAP_SIZE_SHIFT == 0
817  size = mapbits & CHUNK_MAP_SIZE_MASK;
818 #else
819  size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
820 #endif
821 
822  return (size);
823 }
824 
826 arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
827 {
828  size_t mapbits;
829 
830  mapbits = arena_mapbits_get(chunk, pageind);
831  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
832  return (arena_mapbits_size_decode(mapbits));
833 }
834 
836 arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
837 {
838  size_t mapbits;
839 
840  mapbits = arena_mapbits_get(chunk, pageind);
841  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
842  (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
843  return (arena_mapbits_size_decode(mapbits));
844 }
845 
847 arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
848 {
849  size_t mapbits;
850 
851  mapbits = arena_mapbits_get(chunk, pageind);
852  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
853  CHUNK_MAP_ALLOCATED);
854  return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
855 }
856 
858 arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
859 {
860  size_t mapbits;
861  szind_t binind;
862 
863  mapbits = arena_mapbits_get(chunk, pageind);
864  binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
865  assert(binind < JM_NBINS || binind == BININD_INVALID);
866  return (binind);
867 }
868 
870 arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
871 {
872  size_t mapbits;
873 
874  mapbits = arena_mapbits_get(chunk, pageind);
875  assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
876  (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
877  return (mapbits & CHUNK_MAP_DIRTY);
878 }
879 
881 arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
882 {
883  size_t mapbits;
884 
885  mapbits = arena_mapbits_get(chunk, pageind);
886  assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
887  (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
888  return (mapbits & CHUNK_MAP_UNZEROED);
889 }
890 
892 arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
893 {
894  size_t mapbits;
895 
896  mapbits = arena_mapbits_get(chunk, pageind);
897  assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
898  (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
899  return (mapbits & CHUNK_MAP_DECOMMITTED);
900 }
901 
903 arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
904 {
905  size_t mapbits;
906 
907  mapbits = arena_mapbits_get(chunk, pageind);
908  return (mapbits & CHUNK_MAP_LARGE);
909 }
910 
912 arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
913 {
914  size_t mapbits;
915 
916  mapbits = arena_mapbits_get(chunk, pageind);
917  return (mapbits & CHUNK_MAP_ALLOCATED);
918 }
919 
921 arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
922 {
923 
924  *mapbitsp = mapbits;
925 }
926 
929 {
930  size_t mapbits;
931 
932 #if CHUNK_MAP_SIZE_SHIFT > 0
933  mapbits = size << CHUNK_MAP_SIZE_SHIFT;
934 #elif CHUNK_MAP_SIZE_SHIFT == 0
935  mapbits = size;
936 #else
937  mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
938 #endif
939  assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
940  return (mapbits);
941 }
942 
944 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
945  size_t flags)
946 {
947  size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
948  assert((size & PAGE_MASK) == 0);
949  assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
950  assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
951  (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
953  CHUNK_MAP_BININD_INVALID | flags);
954 }
955 
957 arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
958  size_t size)
959 {
960  size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
961  size_t mapbits = arena_mapbitsp_read(mapbitsp);
962  assert((size & PAGE_MASK) == 0);
963  assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
965  (mapbits & ~CHUNK_MAP_SIZE_MASK));
966 }
967 
969 arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
970 {
971  size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
972 
973  assert((flags & CHUNK_MAP_UNZEROED) == flags);
974  arena_mapbitsp_write(mapbitsp, flags);
975 }
976 
978 arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
979  size_t flags)
980 {
981  size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
982 
983  assert((size & PAGE_MASK) == 0);
984  assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
985  assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
986  (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
988  CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
989  CHUNK_MAP_ALLOCATED);
990 }
991 
993 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
994  szind_t binind)
995 {
996  size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
997  size_t mapbits = arena_mapbitsp_read(mapbitsp);
998 
999  assert(binind <= BININD_INVALID);
1000  assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
1001  large_pad);
1002  arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
1003  (binind << CHUNK_MAP_BININD_SHIFT));
1004 }
1005 
1007 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
1008  szind_t binind, size_t flags)
1009 {
1010  size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
1011 
1012  assert(binind < BININD_INVALID);
1013  assert(pageind - runind >= map_bias);
1014  assert((flags & CHUNK_MAP_UNZEROED) == flags);
1015  arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
1016  (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
1017 }
1018 
1019 JEMALLOC_INLINE void
1020 arena_metadata_allocated_add(arena_t *arena, size_t size)
1021 {
1022 
1023  atomic_add_z(&arena->stats.metadata_allocated, size);
1024 }
1025 
1026 JEMALLOC_INLINE void
1027 arena_metadata_allocated_sub(arena_t *arena, size_t size)
1028 {
1029 
1030  atomic_sub_z(&arena->stats.metadata_allocated, size);
1031 }
1032 
1033 JEMALLOC_INLINE size_t
1034 arena_metadata_allocated_get(arena_t *arena)
1035 {
1036 
1037  return (atomic_read_z(&arena->stats.metadata_allocated));
1038 }
1039 
1040 JEMALLOC_INLINE bool
1041 arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
1042 {
1043  if (unlikely((!config_prof) || (prof_interval == 0)))
1044  return (false);
1045 
1046  arena->prof_accumbytes += accumbytes;
1047  if (arena->prof_accumbytes >= prof_interval) {
1048  arena->prof_accumbytes -= prof_interval;
1049  return (true);
1050  }
1051  return (false);
1052 }
1053 
1054 JEMALLOC_INLINE bool
1055 arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
1056 {
1057 
1058  if (unlikely(!config_prof))
1059  return (false);
1060 
1061  if (likely(prof_interval == 0))
1062  return (false);
1063  return (arena_prof_accum_impl(arena, accumbytes));
1064 }
1065 
1066 JEMALLOC_INLINE bool
1067 arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
1068 {
1069  if (unlikely(!config_prof))
1070  return (false);
1071 
1072  if (likely(prof_interval == 0))
1073  return (false);
1074 
1075  {
1076  bool ret;
1077 
1078  malloc_mutex_lock(tsdn, &arena->lock);
1079  ret = arena_prof_accum_impl(arena, accumbytes);
1080  malloc_mutex_unlock(tsdn, &arena->lock);
1081  return (ret);
1082  }
1083 }
1084 
1086 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
1087 {
1088  szind_t binind;
1089 
1090  binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
1091 
1092  if (config_debug) {
1093  arena_chunk_t *chunk;
1094  arena_t *arena;
1095  size_t pageind;
1096  size_t actual_mapbits;
1097  size_t rpages_ind;
1098  const arena_run_t *run;
1099  arena_bin_t *bin;
1100  szind_t run_binind, actual_binind;
1101  arena_bin_info_t *bin_info;
1102  const arena_chunk_map_misc_t *miscelm;
1103  const void *rpages;
1104 
1105  assert(binind != BININD_INVALID);
1106  assert(binind < JM_NBINS);
1107  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1108  arena = extent_node_arena_get(&chunk->node);
1109  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1110  actual_mapbits = arena_mapbits_get(chunk, pageind);
1111  assert(mapbits == actual_mapbits);
1112  assert(arena_mapbits_large_get(chunk, pageind) == 0);
1113  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1114  rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
1115  pageind);
1116  miscelm = arena_miscelm_get_const(chunk, rpages_ind);
1117  run = &miscelm->run;
1118  run_binind = run->binind;
1119  bin = &arena->bins[run_binind];
1120  actual_binind = (szind_t)(bin - arena->bins);
1121  assert(run_binind == actual_binind);
1122  bin_info = &arena_bin_info[actual_binind];
1123  rpages = arena_miscelm_to_rpages(miscelm);
1124  assert(((uintptr_t)ptr - ((uintptr_t)rpages +
1125  (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
1126  == 0);
1127  }
1128 
1129  return (binind);
1130 }
1131 # endif /* JEMALLOC_ARENA_INLINE_A */
1132 
1133 # ifdef JEMALLOC_ARENA_INLINE_B
1135 arena_bin_index(arena_t *arena, arena_bin_t *bin)
1136 {
1137  szind_t binind = (szind_t)(bin - arena->bins);
1138 
1139  assert(binind < JM_NBINS);
1140  return (binind);
1141 }
1142 
1143 JEMALLOC_INLINE size_t
1144 arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
1145 {
1146  size_t diff, interval, shift, regind;
1147  arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1148  void *rpages = arena_miscelm_to_rpages(miscelm);
1149 
1150  /*
1151  * Freeing a pointer lower than region zero can cause assertion
1152  * failure.
1153  */
1154  assert((uintptr_t)ptr >= (uintptr_t)rpages +
1155  (uintptr_t)bin_info->reg0_offset);
1156 
1157  /*
1158  * Avoid doing division with a variable divisor if possible. Using
1159  * actual division here can reduce allocator throughput by over 20%!
1160  */
1161  diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
1162  bin_info->reg0_offset);
1163 
1164  /* Rescale (factor powers of 2 out of the numerator and denominator). */
1165  interval = bin_info->reg_interval;
1166  shift = ffs_zu(interval) - 1;
1167  diff >>= shift;
1168  interval >>= shift;
1169 
1170  if (interval == 1) {
1171  /* The divisor was a power of 2. */
1172  regind = diff;
1173  } else {
1174  /*
1175  * To divide by a number D that is not a power of two we
1176  * multiply by (2^21 / D) and then right shift by 21 positions.
1177  *
1178  * X / D
1179  *
1180  * becomes
1181  *
1182  * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
1183  *
1184  * We can omit the first three elements, because we never
1185  * divide by 0, and 1 and 2 are both powers of two, which are
1186  * handled above.
1187  */
1188 #define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
1189 #define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
1190  static const size_t interval_invs[] = {
1191  SIZE_INV(3),
1192  SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
1193  SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
1194  SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
1195  SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
1196  SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
1197  SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
1198  SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
1199  };
1200 
1201  if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
1202  + 2))) {
1203  regind = (diff * interval_invs[interval - 3]) >>
1204  SIZE_INV_SHIFT;
1205  } else
1206  regind = diff / interval;
1207 #undef SIZE_INV
1208 #undef SIZE_INV_SHIFT
1209  }
1210 
1211  assert(diff == regind * interval);
1212  assert(regind < bin_info->nregs);
1213 
1214  return (regind);
1215 }
1216 
1217 JEMALLOC_INLINE prof_tctx_t *
1218 arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
1219 {
1220  prof_tctx_t *ret;
1221  arena_chunk_t *chunk;
1222 
1223  if (unlikely((!config_prof) || (ptr == NULL)))
1224  return (NULL);
1225 
1226  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1227  if (likely(chunk != ptr)) {
1228  size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1229  size_t mapbits = arena_mapbits_get(chunk, pageind);
1230 
1231  if (unlikely(mapbits & CHUNK_MAP_ALLOCATED) == 0)
1232  return (NULL);
1233 
1234  if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
1235  ret = (prof_tctx_t *)(uintptr_t)1U;
1236  else {
1237  arena_chunk_map_misc_t *elm =
1238  arena_miscelm_get_mutable(chunk, pageind);
1239  ret = atomic_read_p(&elm->prof_tctx_pun);
1240  }
1241  } else
1242  ret = huge_prof_tctx_get(tsdn, ptr);
1243 
1244  return (ret);
1245 }
1246 
1247 JEMALLOC_INLINE void
1248 arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
1249  prof_tctx_t *tctx)
1250 {
1251  arena_chunk_t *chunk;
1252 
1253  cassert(config_prof);
1254  assert(ptr != NULL);
1255 
1256  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1257  if (likely(chunk != ptr)) {
1258  size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1259 
1260  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1261 
1262  if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
1263  (uintptr_t)1U)) {
1264  arena_chunk_map_misc_t *elm;
1265 
1266  assert(arena_mapbits_large_get(chunk, pageind) != 0);
1267 
1268  elm = arena_miscelm_get_mutable(chunk, pageind);
1269  atomic_write_p(&elm->prof_tctx_pun, tctx);
1270  } else {
1271  /*
1272  * tctx must always be initialized for large runs.
1273  * Assert that the surrounding conditional logic is
1274  * equivalent to checking whether ptr refers to a large
1275  * run.
1276  */
1277  assert(arena_mapbits_large_get(chunk, pageind) == 0);
1278  }
1279  } else
1280  huge_prof_tctx_set(tsdn, ptr, tctx);
1281 }
1282 
1283 JEMALLOC_INLINE void
1284 arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
1285  const void *old_ptr, prof_tctx_t *old_tctx)
1286 {
1287 
1288  cassert(config_prof);
1289  assert(ptr != NULL);
1290 
1291  if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
1292  (uintptr_t)old_tctx > (uintptr_t)1U))) {
1293  arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1294  if (likely(chunk != ptr)) {
1295  size_t pageind;
1296  arena_chunk_map_misc_t *elm;
1297 
1298  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1299  LG_PAGE;
1301  0);
1302  assert(arena_mapbits_large_get(chunk, pageind) != 0);
1303 
1304  elm = arena_miscelm_get_mutable(chunk, pageind);
1305  atomic_write_p(&elm->prof_tctx_pun,
1306  (prof_tctx_t *)(uintptr_t)1U);
1307  } else
1308  huge_prof_tctx_reset(tsdn, ptr);
1309  }
1310 }
1311 
1313 arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
1314 {
1315  tsd_t *tsd;
1316  ticker_t *decay_ticker;
1317 
1318  if (unlikely(tsdn_null(tsdn)))
1319  return;
1320  tsd = tsdn_tsd(tsdn);
1321  decay_ticker = decay_ticker_get(tsd, arena->ind);
1322  if (unlikely(decay_ticker == NULL))
1323  return;
1324  if (unlikely(ticker_ticks(decay_ticker, nticks)))
1325  arena_purge(tsdn, arena, false);
1326 }
1327 
1329 arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
1330 {
1331 
1332  arena_decay_ticks(tsdn, arena, 1);
1333 }
1334 
1336 arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
1337  tcache_t *tcache, bool slow_path)
1338 {
1339 
1340  if (unlikely(!(!tsdn_null(tsdn) || tcache == NULL) || !(size != 0)))
1341  return (NULL);
1342 
1343  if (likely(tcache != NULL)) {
1344  if (likely(size <= SMALL_MAXCLASS)) {
1345  return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
1346  tcache, size, ind, zero, slow_path));
1347  }
1348  if (likely(size <= tcache_maxclass)) {
1349  return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
1350  tcache, size, ind, zero, slow_path));
1351  }
1352  /* (size > tcache_maxclass) case falls through. */
1353 
1354  if (unlikely(size < tcache_maxclass))
1355  return (NULL);
1356  }
1357 
1358  return (arena_malloc_hard(tsdn, arena, size, ind, zero));
1359 }
1360 
1361 JEMALLOC_ALWAYS_INLINE arena_t *
1362 arena_aalloc(const void *ptr)
1363 {
1364  arena_chunk_t *chunk;
1365 
1366  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1367  if (likely(chunk != ptr))
1368  return (extent_node_arena_get(&chunk->node));
1369  else
1370  return (huge_aalloc(ptr));
1371 }
1372 
1373 /* Return the size of the allocation pointed to by ptr. */
1375 arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
1376 {
1377  size_t ret;
1378  arena_chunk_t *chunk;
1379  size_t pageind;
1380  szind_t binind;
1381 
1382  assert(ptr != NULL);
1383 
1384  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1385  if (likely(chunk != ptr)) {
1386  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1387  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1388  binind = arena_mapbits_binind_get(chunk, pageind);
1389  if (unlikely(binind == BININD_INVALID || (config_prof && !demote
1390  && arena_mapbits_large_get(chunk, pageind) != 0))) {
1391  /*
1392  * Large allocation. In the common case (demote), and
1393  * as this is an inline function, most callers will only
1394  * end up looking at binind to determine that ptr is a
1395  * small allocation.
1396  */
1398  PAGE_MASK) == 0);
1399  ret = arena_mapbits_large_size_get(chunk, pageind) -
1400  large_pad;
1401  assert(ret != 0);
1402  assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
1403  chunk_npages);
1404  assert(arena_mapbits_dirty_get(chunk, pageind) ==
1406  pageind+((ret+large_pad)>>LG_PAGE)-1));
1407  } else {
1408  /*
1409  * Small allocation (possibly promoted to a large
1410  * object).
1411  */
1412  assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1414  arena_mapbits_get(chunk, pageind)) == binind);
1415  ret = index2size(binind);
1416  }
1417  } else
1418  ret = huge_salloc(tsdn, ptr);
1419 
1420  return (ret);
1421 }
1422 
1424 arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
1425 {
1426  arena_chunk_t *chunk;
1427  size_t pageind, mapbits;
1428  assert(!tsdn_null(tsdn) || tcache == NULL);
1429  assert(ptr != NULL);
1430 
1431  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1432  if (likely(chunk != ptr)) {
1433  pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1434  mapbits = arena_mapbits_get(chunk, pageind);
1435  assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1436  if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
1437  /* Small allocation. */
1438  if (likely(tcache != NULL)) {
1439  szind_t binind = arena_ptr_small_binind_get(ptr,
1440  mapbits);
1441  tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
1442  binind, slow_path);
1443  } else {
1444  arena_dalloc_small(tsdn,
1446  ptr, pageind);
1447  }
1448  } else {
1450  pageind);
1451 
1453  PAGE_MASK) == 0);
1454 
1455  if (likely(tcache != NULL) && size - large_pad <=
1456  tcache_maxclass) {
1457  tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1458  size - large_pad, slow_path);
1459  } else {
1460  arena_dalloc_large(tsdn,
1462  ptr);
1463  }
1464  }
1465  } else
1466  huge_dalloc(tsdn, ptr);
1467 }
1468 
1470 arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
1471  bool slow_path)
1472 {
1473  arena_chunk_t *chunk;
1474 
1475  assert(!tsdn_null(tsdn) || tcache == NULL);
1476 
1477  chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1478  if (likely(chunk != ptr)) {
1479  if (config_prof && opt_prof) {
1480  size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1481  LG_PAGE;
1483  0);
1484  if (arena_mapbits_large_get(chunk, pageind) != 0) {
1485  /*
1486  * Make sure to use promoted size, not request
1487  * size.
1488  */
1490  pageind) - large_pad;
1491  }
1492  }
1493  assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
1494 
1495  if (likely(size <= SMALL_MAXCLASS)) {
1496  /* Small allocation. */
1497  if (likely(tcache != NULL)) {
1498  szind_t binind = size2index(size);
1499  tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
1500  binind, slow_path);
1501  } else {
1502  size_t pageind = ((uintptr_t)ptr -
1503  (uintptr_t)chunk) >> LG_PAGE;
1504  arena_dalloc_small(tsdn,
1506  ptr, pageind);
1507  }
1508  } else {
1510  PAGE_MASK) == 0);
1511 
1512  if (likely(tcache != NULL) && size <= tcache_maxclass) {
1513  tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1514  size, slow_path);
1515  } else {
1516  arena_dalloc_large(tsdn,
1518  ptr);
1519  }
1520  }
1521  } else
1522  huge_dalloc(tsdn, ptr);
1523 }
1524 # endif /* JEMALLOC_ARENA_INLINE_B */
1525 #endif
1526 
1527 #endif /* JEMALLOC_H_INLINES */
1528 /******************************************************************************/
#define rd()
static RZ_NULLABLE RzILOpBitVector * shift(RzILOpBitVector *val, RZ_NULLABLE RzILOpBool **carry_out, arm_shifter type, RZ_OWN RzILOpBitVector *dist)
Definition: arm_il32.c:190
int bits(struct state *s, int need)
Definition: blast.c:72
#define NULL
Definition: cris-opc.c:27
voidpf void uLong size
Definition: ioapi.h:138
unsigned szind_t
ticker_t * decay_ticker_get(tsd_t *tsd, unsigned ind)
szind_t size2index(size_t size)
static const bool config_cache_oblivious
static const bool config_prof
#define PAGE
static const bool config_debug
size_t s2u(size_t size)
size_t index2size(szind_t index)
#define PAGE_MASK
#define offsetof(type, member)
#define LG_PAGE
#define JEMALLOC_INLINE
#define JEMALLOC_ALWAYS_INLINE
static static fork const void static count static fd const char const char static newpath char char char static envp time
Definition: sflib.h:42
#define likely(expr)
Definition: lz4.c:174
#define unlikely(expr)
Definition: lz4.c:177
assert(limit<=UINT32_MAX/2)
#define ph(a_type)
Definition: ph.h:27
#define phn(a_type)
Definition: ph.h:19
#define malloc_mutex_lock
#define arena_bin_index
#define arena_dalloc_large
#define arena_mapbits_large_binind_set
#define arena_alloc_junk_small
#define tcache_alloc_small
#define arena_ralloc_no_move
#define arena_mapbits_small_set
#define arena_node_dalloc
#define arena_dalloc_bin
#define arena_metadata_allocated_get
#define huge_salloc
#define arena_decay_time_default_set
#define arena_rd_to_miscelm
#define arena_run_regind
#define purge_mode_names
#define chunk_npages
#define run_quantize_floor
#define nlclasses
#define arena_mapbitsp_get_const
#define huge_dalloc
#define arena_dalloc_large_junked_locked
#define arena_ralloc
#define opt_purge
#define arena_mapbits_unzeroed_get
#define arena_boot
#define opt_lg_dirty_mult
#define arena_dalloc_bin_junked_locked
#define map_bias
#define arena_miscelm_get_const
#define arena_dalloc_junk_large
#define tcache_dalloc_large
#define arena_mapbits_size_encode
#define arena_prefork0
#define arena_prof_tctx_reset
#define arena_bin_info
#define arena_prof_promoted
#define arena_basic_stats_merge
#define arena_mapbits_decommitted_get
#define huge_prof_tctx_set
#define arena_nthreads_get
#define opt_decay_time
#define arena_mapbits_large_size_get
#define arena_run_to_miscelm
#define arena_nthreads_inc
#define arena_prof_accum_locked
#define arena_maxrun
#define arena_postfork_parent
#define arena_prefork1
#define arena_decay_time_default_get
#define arena_mapbits_get
#define prof_interval
#define arena_bitselm_get_const
#define extent_node_arena_get
#define arena_malloc_large
#define nhclasses
#define atomic_write_p
#define opt_prof
#define arena_new
#define arena_metadata_allocated_sub
#define arena_dss_prec_get
#define arena_chunk_cache_maybe_insert
#define huge_aalloc
#define arena_mapbitsp_write
#define ffs_zu
#define tsdn_null
#define arena_lg_dirty_mult_get
#define large_maxclass
#define tcache_alloc_large
#define ticker_ticks
#define arena_dalloc_junk_small
#define arena_chunk_ralloc_huge_expand
#define atomic_add_z
#define arena_chunk_cache_maybe_remove
#define arena_lg_dirty_mult_default_get
#define arena_prof_tctx_set
#define arena_chunk_ralloc_huge_shrink
#define arena_bitselm_get_mutable
#define arena_miscelm_to_rpages
#define arena_mapbits_unallocated_size_get
#define arena_miscelm_to_pageind
#define arena_mapbits_large_get
#define tcache_maxclass
#define huge_prof_tctx_reset
#define arena_maybe_purge
#define malloc_mutex_unlock
#define arena_mapbits_small_runind_get
#define arena_chunk_ralloc_huge_similar
#define arena_prof_accum_impl
#define arena_decay_tick
#define arena_reset
#define arena_sdalloc
#define arena_dalloc_small
#define arena_mapbits_size_decode
#define arena_nthreads_dec
#define arena_tcache_fill_small
#define arena_lg_dirty_mult_default_set
#define arena_prof_accum
#define arena_mapbits_unallocated_size_set
#define arena_mapbitsp_get_mutable
#define arena_quarantine_junk_small
#define arena_prefork2
#define arena_malloc_hard
#define arena_decay_ticks
#define arena_ptr_small_binind_get
#define arena_extent_sn_next
#define arena_salloc
#define arena_mapbits_large_set
#define arena_lg_dirty_mult_set
#define arena_mapbits_dirty_get
#define huge_prof_tctx_get
#define arena_palloc
#define arena_mapbits_unallocated_set
#define arena_redzone_corruption
#define arena_decay_time_set
#define arena_chunk_dalloc_huge
#define arena_postfork_child
#define arena_node_alloc
#define tcache_dalloc_small
#define arena_aalloc
#define arena_mapbitsp_read
#define arena_prefork3
#define run_quantize_ceil
#define arena_ralloc_junk_large
#define arena_purge
#define arena_mapbits_allocated_get
#define arena_miscelm_get_mutable
#define arena_dss_prec_set
#define opt_thp
#define tsdn_tsd
#define atomic_sub_z
#define arena_mapbits_binind_get
#define arena_chunk_alloc_huge
#define arena_mapbits_internal_set
#define arena_metadata_allocated_add
#define arena_stats_merge
#define arena_decay_time_get
#define arena_dalloc
#define map_misc_offset
#define arena_malloc
#define arena_prof_tctx_get
#define ql_head(a_type)
Definition: ql.h:2
#define qr(a_type)
Definition: qr.h:2
static int run(int i, const char *arg)
Definition: rz-bb.c:19
static struct sockaddr static addrlen static backlog const void static flags void flags
Definition: sfsocketcall.h:123
int size_t
Definition: sftypes.h:40
unsigned int uint32_t
Definition: sftypes.h:29
unsigned long uint64_t
Definition: sftypes.h:28
unsigned char uint8_t
Definition: sftypes.h:31
int ssize_t
Definition: sftypes.h:39
_W64 unsigned int uintptr_t
Definition: malloc.c:26
Definition: malloc.c:21
Definition: gun.c:81
#define bool
Definition: sysdefs.h:146
static unsigned int nthreads
Definition: threadpool.c:37
static void lock(volatile int *lk)
Definition: malloc.c:61
struct bin bins[64]
Definition: malloc.c:34