Rizin
unix-like reverse engineering framework and cli tools
stats.h
Go to the documentation of this file.
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3 
4 typedef struct tcache_bin_stats_s tcache_bin_stats_t;
5 typedef struct malloc_bin_stats_s malloc_bin_stats_t;
6 typedef struct malloc_large_stats_s malloc_large_stats_t;
7 typedef struct malloc_huge_stats_s malloc_huge_stats_t;
8 typedef struct arena_stats_s arena_stats_t;
9 typedef struct chunk_stats_s chunk_stats_t;
10 
11 #endif /* JEMALLOC_H_TYPES */
12 /******************************************************************************/
13 #ifdef JEMALLOC_H_STRUCTS
14 
15 struct tcache_bin_stats_s {
16  /*
17  * Number of allocation requests that corresponded to the size of this
18  * bin.
19  */
20  uint64_t nrequests;
21 };
22 
23 struct malloc_bin_stats_s {
24  /*
25  * Total number of allocation/deallocation requests served directly by
26  * the bin. Note that tcache may allocate an object, then recycle it
27  * many times, resulting many increments to nrequests, but only one
28  * each to nmalloc and ndalloc.
29  */
30  uint64_t nmalloc;
31  uint64_t ndalloc;
32 
33  /*
34  * Number of allocation requests that correspond to the size of this
35  * bin. This includes requests served by tcache, though tcache only
36  * periodically merges into this counter.
37  */
38  uint64_t nrequests;
39 
40  /*
41  * Current number of regions of this size class, including regions
42  * currently cached by tcache.
43  */
44  size_t curregs;
45 
46  /* Number of tcache fills from this bin. */
47  uint64_t nfills;
48 
49  /* Number of tcache flushes to this bin. */
50  uint64_t nflushes;
51 
52  /* Total number of runs created for this bin's size class. */
53  uint64_t nruns;
54 
55  /*
56  * Total number of runs reused by extracting them from the runs tree for
57  * this bin's size class.
58  */
59  uint64_t reruns;
60 
61  /* Current number of runs in this bin. */
62  size_t curruns;
63 };
64 
65 struct malloc_large_stats_s {
66  /*
67  * Total number of allocation/deallocation requests served directly by
68  * the arena. Note that tcache may allocate an object, then recycle it
69  * many times, resulting many increments to nrequests, but only one
70  * each to nmalloc and ndalloc.
71  */
72  uint64_t nmalloc;
73  uint64_t ndalloc;
74 
75  /*
76  * Number of allocation requests that correspond to this size class.
77  * This includes requests served by tcache, though tcache only
78  * periodically merges into this counter.
79  */
80  uint64_t nrequests;
81 
82  /*
83  * Current number of runs of this size class, including runs currently
84  * cached by tcache.
85  */
86  size_t curruns;
87 };
88 
89 struct malloc_huge_stats_s {
90  /*
91  * Total number of allocation/deallocation requests served directly by
92  * the arena.
93  */
94  uint64_t nmalloc;
95  uint64_t ndalloc;
96 
97  /* Current number of (multi-)chunk allocations of this size class. */
98  size_t curhchunks;
99 };
100 
101 struct arena_stats_s {
102  /* Number of bytes currently mapped. */
103  size_t mapped;
104 
105  /*
106  * Number of bytes currently retained as a side effect of munmap() being
107  * disabled/bypassed. Retained bytes are technically mapped (though
108  * always decommitted or purged), but they are excluded from the mapped
109  * statistic (above).
110  */
111  size_t retained;
112 
113  /*
114  * Total number of purge sweeps, total number of madvise calls made,
115  * and total pages purged in order to keep dirty unused memory under
116  * control.
117  */
118  uint64_t npurge;
119  uint64_t nmadvise;
120  uint64_t purged;
121 
122  /*
123  * Number of bytes currently mapped purely for metadata purposes, and
124  * number of bytes currently allocated for internal metadata.
125  */
126  size_t metadata_mapped;
127  size_t metadata_allocated; /* Protected via atomic_*_z(). */
128 
129  /* Per-size-category statistics. */
130  size_t allocated_large;
131  uint64_t nmalloc_large;
132  uint64_t ndalloc_large;
133  uint64_t nrequests_large;
134 
135  size_t allocated_huge;
136  uint64_t nmalloc_huge;
137  uint64_t ndalloc_huge;
138 
139  /* One element for each large size class. */
140  malloc_large_stats_t *lstats;
141 
142  /* One element for each huge size class. */
143  malloc_huge_stats_t *hstats;
144 };
145 
146 #endif /* JEMALLOC_H_STRUCTS */
147 /******************************************************************************/
148 #ifdef JEMALLOC_H_EXTERNS
149 
150 extern bool opt_stats_print;
151 
152 extern size_t stats_cactive;
153 
154 void stats_print(void (*write)(void *, const char *), void *cbopaque,
155  const char *opts);
156 
157 #endif /* JEMALLOC_H_EXTERNS */
158 /******************************************************************************/
159 #ifdef JEMALLOC_H_INLINES
160 
161 #ifndef JEMALLOC_ENABLE_INLINE
162 size_t stats_cactive_get(void);
163 void stats_cactive_add(size_t size);
164 void stats_cactive_sub(size_t size);
165 #endif
166 
167 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
168 JEMALLOC_INLINE size_t
169 stats_cactive_get(void)
170 {
171 
172  return (atomic_read_z(&stats_cactive));
173 }
174 
175 JEMALLOC_INLINE void
176 stats_cactive_add(size_t size)
177 {
178 
179  assert(size > 0);
180  assert((size & chunksize_mask) == 0);
181 
183 }
184 
185 JEMALLOC_INLINE void
186 stats_cactive_sub(size_t size)
187 {
188 
189  assert(size > 0);
190  assert((size & chunksize_mask) == 0);
191 
193 }
194 #endif
195 
196 #endif /* JEMALLOC_H_INLINES */
197 /******************************************************************************/
static static fork write
Definition: sflib.h:33
voidpf void uLong size
Definition: ioapi.h:138
#define JEMALLOC_INLINE
assert(limit<=UINT32_MAX/2)
#define stats_print
#define stats_cactive_get
#define chunksize_mask
#define atomic_add_z
#define opt_stats_print
#define stats_cactive_add
#define stats_cactive_sub
#define atomic_sub_z
#define stats_cactive
unsigned long uint64_t
Definition: sftypes.h:28