First Commit
This commit is contained in:
1063
deps/jemalloc/include/jemalloc/internal/arena.h
vendored
Normal file
1063
deps/jemalloc/include/jemalloc/internal/arena.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
104
deps/jemalloc/include/jemalloc/internal/arena_externs.h
vendored
Normal file
104
deps/jemalloc/include/jemalloc/internal/arena_externs.h
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/hook.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
|
||||
extern ssize_t opt_dirty_decay_ms;
|
||||
extern ssize_t opt_muzzy_decay_ms;
|
||||
|
||||
extern percpu_arena_mode_t opt_percpu_arena;
|
||||
extern const char *percpu_arena_mode_names[];
|
||||
|
||||
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
|
||||
extern size_t opt_oversize_threshold;
|
||||
extern size_t oversize_threshold;
|
||||
|
||||
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
||||
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
||||
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||
bin_stats_t *bstats, arena_stats_large_t *lstats,
|
||||
arena_stats_extents_t *estats);
|
||||
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
||||
#endif
|
||||
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||
size_t usize, size_t alignment, bool *zero);
|
||||
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent);
|
||||
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, size_t oldsize);
|
||||
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, size_t oldsize);
|
||||
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
|
||||
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
||||
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
|
||||
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
||||
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
bool all);
|
||||
void arena_reset(tsd_t *tsd, arena_t *arena);
|
||||
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
||||
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
||||
void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
|
||||
bool zero);
|
||||
|
||||
typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
|
||||
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
|
||||
|
||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||
szind_t ind, bool zero);
|
||||
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache);
|
||||
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
|
||||
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||
bool slow_path);
|
||||
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||
szind_t binind, extent_t *extent, void *ptr);
|
||||
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
||||
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, bool zero, size_t *newsize);
|
||||
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||
size_t size, size_t alignment, bool zero, tcache_t *tcache,
|
||||
hook_ralloc_args_t *hook_args);
|
||||
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
||||
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
||||
ssize_t arena_dirty_decay_ms_default_get(void);
|
||||
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
|
||||
ssize_t arena_muzzy_decay_ms_default_get(void);
|
||||
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
|
||||
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
|
||||
size_t *old_limit, size_t *new_limit);
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
size_t arena_extent_sn_next(arena_t *arena);
|
||||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||
bool arena_init_huge(void);
|
||||
bool arena_is_huge(unsigned arena_ind);
|
||||
arena_t *arena_choose_huge(tsd_t *tsd);
|
||||
bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
unsigned *binshard);
|
||||
void arena_boot(sc_data_t *sc_data);
|
||||
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
|
||||
57
deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h
vendored
Normal file
57
deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||
|
||||
static inline unsigned
|
||||
arena_ind_get(const arena_t *arena) {
|
||||
return base_ind_get(arena->base);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_internal_add(arena_t *arena, size_t size) {
|
||||
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_internal_sub(arena_t *arena, size_t size) {
|
||||
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
arena_internal_get(arena_t *arena) {
|
||||
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
|
||||
}
|
||||
|
||||
static inline void
|
||||
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
||||
assert(have_percpu_arena);
|
||||
arena_t *oldarena = tsd_arena_get(tsd);
|
||||
assert(oldarena != NULL);
|
||||
unsigned oldind = arena_ind_get(oldarena);
|
||||
|
||||
if (oldind != cpu) {
|
||||
unsigned newind = cpu;
|
||||
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
|
||||
assert(newarena != NULL);
|
||||
|
||||
/* Set new arena/tcache associations. */
|
||||
arena_migrate(tsd, oldind, newind);
|
||||
tcache_t *tcache = tcache_get(tsd);
|
||||
if (tcache != NULL) {
|
||||
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
|
||||
newarena);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
|
||||
427
deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
vendored
Normal file
427
deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
vendored
Normal file
@@ -0,0 +1,427 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
arena_has_default_hooks(arena_t *arena) {
|
||||
return (extent_hooks_get(arena) == &extent_hooks_default);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
|
||||
if (arena != NULL) {
|
||||
return arena;
|
||||
}
|
||||
|
||||
/*
|
||||
* For huge allocations, use the dedicated huge arena if both are true:
|
||||
* 1) is using auto arena selection (i.e. arena == NULL), and 2) the
|
||||
* thread is not assigned to a manual arena.
|
||||
*/
|
||||
if (unlikely(size >= oversize_threshold)) {
|
||||
arena_t *tsd_arena = tsd_arena_get(tsd);
|
||||
if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
|
||||
return arena_choose_huge(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
return arena_choose(tsd, NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
/* Static check. */
|
||||
if (alloc_ctx == NULL) {
|
||||
const extent_t *extent = iealloc(tsdn, ptr);
|
||||
if (unlikely(!extent_slab_get(extent))) {
|
||||
return large_prof_tctx_get(tsdn, extent);
|
||||
}
|
||||
} else {
|
||||
if (unlikely(!alloc_ctx->slab)) {
|
||||
return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
|
||||
}
|
||||
}
|
||||
return (prof_tctx_t *)(uintptr_t)1U;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
/* Static check. */
|
||||
if (alloc_ctx == NULL) {
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
if (unlikely(!extent_slab_get(extent))) {
|
||||
large_prof_tctx_set(tsdn, extent, tctx);
|
||||
}
|
||||
} else {
|
||||
if (unlikely(!alloc_ctx->slab)) {
|
||||
large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
assert(!extent_slab_get(extent));
|
||||
|
||||
large_prof_tctx_reset(tsdn, extent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE nstime_t
|
||||
arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
|
||||
alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
/*
|
||||
* Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
|
||||
* sure we have a sampled allocation.
|
||||
*/
|
||||
assert(!extent_slab_get(extent));
|
||||
return large_prof_alloc_time_get(extent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
|
||||
nstime_t t) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
assert(!extent_slab_get(extent));
|
||||
large_prof_alloc_time_set(extent, t);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
||||
tsd_t *tsd;
|
||||
ticker_t *decay_ticker;
|
||||
|
||||
if (unlikely(tsdn_null(tsdn))) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
|
||||
if (unlikely(decay_ticker == NULL)) {
|
||||
return;
|
||||
}
|
||||
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
|
||||
arena_decay(tsdn, arena, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
|
||||
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
|
||||
|
||||
arena_decay_ticks(tsdn, arena, 1);
|
||||
}
|
||||
|
||||
/* Purge a single extent to retained / unmapped directly. */
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
extent_t *extent) {
|
||||
size_t extent_size = extent_size_get(extent);
|
||||
extent_dalloc_wrapper(tsdn, arena,
|
||||
r_extent_hooks, extent);
|
||||
if (config_stats) {
|
||||
/* Update stats accordingly. */
|
||||
arena_stats_lock(tsdn, &arena->stats);
|
||||
arena_stats_add_u64(tsdn, &arena->stats,
|
||||
&arena->decay_dirty.stats->nmadvise, 1);
|
||||
arena_stats_add_u64(tsdn, &arena->stats,
|
||||
&arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
|
||||
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
|
||||
extent_size);
|
||||
arena_stats_unlock(tsdn, &arena->stats);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
tcache_t *tcache, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
|
||||
if (likely(tcache != NULL)) {
|
||||
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
||||
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path);
|
||||
}
|
||||
if (likely(size <= tcache_maxclass)) {
|
||||
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path);
|
||||
}
|
||||
/* (size > tcache_maxclass) case falls through. */
|
||||
assert(size > tcache_maxclass);
|
||||
}
|
||||
|
||||
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
return extent_arena_get(iealloc(tsdn, ptr));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, true);
|
||||
assert(szind != SC_NSIZES);
|
||||
|
||||
return sz_index2size(szind);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
/*
|
||||
* Return 0 if ptr is not within an extent managed by jemalloc. This
|
||||
* function has two extra costs relative to isalloc():
|
||||
* - The rtree calls cannot claim to be dependent lookups, which induces
|
||||
* rtree lookup load dependencies.
|
||||
* - The lookup may fail, so there is an extra branch to check for
|
||||
* failure.
|
||||
*/
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
extent_t *extent;
|
||||
szind_t szind;
|
||||
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, false, &extent, &szind)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (extent == NULL) {
|
||||
return 0;
|
||||
}
|
||||
assert(extent_state_get(extent) == extent_state_active);
|
||||
/* Only slab members should be looked up via interior pointers. */
|
||||
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
||||
|
||||
assert(szind != SC_NSIZES);
|
||||
|
||||
return sz_index2size(szind);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
|
||||
if (config_prof && unlikely(szind < SC_NBINS)) {
|
||||
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
||||
} else {
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
||||
true, &szind, &slab);
|
||||
|
||||
if (config_debug) {
|
||||
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
||||
rtree_ctx, (uintptr_t)ptr, true);
|
||||
assert(szind == extent_szind_get(extent));
|
||||
assert(szind < SC_NSIZES);
|
||||
assert(slab == extent_slab_get(extent));
|
||||
}
|
||||
|
||||
if (likely(slab)) {
|
||||
/* Small allocation. */
|
||||
arena_dalloc_small(tsdn, ptr);
|
||||
} else {
|
||||
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
||||
bool slow_path) {
|
||||
if (szind < nhbins) {
|
||||
if (config_prof && unlikely(szind < SC_NBINS)) {
|
||||
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||
slow_path);
|
||||
}
|
||||
} else {
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (unlikely(tcache == NULL)) {
|
||||
arena_dalloc_no_tcache(tsdn, ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
rtree_ctx_t *rtree_ctx;
|
||||
if (alloc_ctx != NULL) {
|
||||
szind = alloc_ctx->szind;
|
||||
slab = alloc_ctx->slab;
|
||||
assert(szind != SC_NSIZES);
|
||||
} else {
|
||||
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, true, &szind, &slab);
|
||||
}
|
||||
|
||||
if (config_debug) {
|
||||
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
||||
rtree_ctx, (uintptr_t)ptr, true);
|
||||
assert(szind == extent_szind_get(extent));
|
||||
assert(szind < SC_NSIZES);
|
||||
assert(slab == extent_slab_get(extent));
|
||||
}
|
||||
|
||||
if (likely(slab)) {
|
||||
/* Small allocation. */
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||
slow_path);
|
||||
} else {
|
||||
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
||||
assert(ptr != NULL);
|
||||
assert(size <= SC_LARGE_MAXCLASS);
|
||||
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
if (!config_prof || !opt_prof) {
|
||||
/*
|
||||
* There is no risk of being confused by a promoted sampled
|
||||
* object, so base szind and slab on the given size.
|
||||
*/
|
||||
szind = sz_size2index(size);
|
||||
slab = (szind < SC_NBINS);
|
||||
}
|
||||
|
||||
if ((config_prof && opt_prof) || config_debug) {
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
||||
&rtree_ctx_fallback);
|
||||
|
||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, true, &szind, &slab);
|
||||
|
||||
assert(szind == sz_size2index(size));
|
||||
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
|
||||
|
||||
if (config_debug) {
|
||||
extent_t *extent = rtree_extent_read(tsdn,
|
||||
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
||||
assert(szind == extent_szind_get(extent));
|
||||
assert(slab == extent_slab_get(extent));
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(slab)) {
|
||||
/* Small allocation. */
|
||||
arena_dalloc_small(tsdn, ptr);
|
||||
} else {
|
||||
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
assert(size <= SC_LARGE_MAXCLASS);
|
||||
|
||||
if (unlikely(tcache == NULL)) {
|
||||
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
||||
return;
|
||||
}
|
||||
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
alloc_ctx_t local_ctx;
|
||||
if (config_prof && opt_prof) {
|
||||
if (alloc_ctx == NULL) {
|
||||
/* Uncommon case and should be a static check. */
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
||||
&rtree_ctx_fallback);
|
||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, true, &local_ctx.szind,
|
||||
&local_ctx.slab);
|
||||
assert(local_ctx.szind == sz_size2index(size));
|
||||
alloc_ctx = &local_ctx;
|
||||
}
|
||||
slab = alloc_ctx->slab;
|
||||
szind = alloc_ctx->szind;
|
||||
} else {
|
||||
/*
|
||||
* There is no risk of being confused by a promoted sampled
|
||||
* object, so base szind and slab on the given size.
|
||||
*/
|
||||
szind = sz_size2index(size);
|
||||
slab = (szind < SC_NBINS);
|
||||
}
|
||||
|
||||
if (config_debug) {
|
||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, true, &szind, &slab);
|
||||
extent_t *extent = rtree_extent_read(tsdn,
|
||||
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
||||
assert(szind == extent_szind_get(extent));
|
||||
assert(slab == extent_slab_get(extent));
|
||||
}
|
||||
|
||||
if (likely(slab)) {
|
||||
/* Small allocation. */
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||
slow_path);
|
||||
} else {
|
||||
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|
||||
271
deps/jemalloc/include/jemalloc/internal/arena_stats.h
vendored
Normal file
271
deps/jemalloc/include/jemalloc/internal/arena_stats.h
vendored
Normal file
@@ -0,0 +1,271 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
typedef atomic_u64_t arena_stats_u64_t;
|
||||
#else
|
||||
/* Must hold the arena stats mutex while reading atomically. */
|
||||
typedef uint64_t arena_stats_u64_t;
|
||||
#endif
|
||||
|
||||
typedef struct arena_stats_large_s arena_stats_large_t;
|
||||
struct arena_stats_large_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
arena_stats_u64_t nmalloc;
|
||||
arena_stats_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||
/*
|
||||
* Number of tcache fills / flushes for large (similarly, periodically
|
||||
* merged). Note that there is no large tcache batch-fill currently
|
||||
* (i.e. only fill 1 at a time); however flush may be batched.
|
||||
*/
|
||||
arena_stats_u64_t nfills; /* Partially derived. */
|
||||
arena_stats_u64_t nflushes; /* Partially derived. */
|
||||
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
};
|
||||
|
||||
typedef struct arena_stats_decay_s arena_stats_decay_t;
|
||||
struct arena_stats_decay_s {
|
||||
/* Total number of purge sweeps. */
|
||||
arena_stats_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
arena_stats_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
arena_stats_u64_t purged;
|
||||
};
|
||||
|
||||
typedef struct arena_stats_extents_s arena_stats_extents_t;
|
||||
struct arena_stats_extents_s {
|
||||
/*
|
||||
* Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
|
||||
* We track both bytes and # of extents: two extents in the same bucket
|
||||
* may have different sizes if adjacent size classes differ by more than
|
||||
* a page, so bytes cannot always be derived from # of extents.
|
||||
*/
|
||||
atomic_zu_t ndirty;
|
||||
atomic_zu_t dirty_bytes;
|
||||
atomic_zu_t nmuzzy;
|
||||
atomic_zu_t muzzy_bytes;
|
||||
atomic_zu_t nretained;
|
||||
atomic_zu_t retained_bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
struct arena_stats_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
#endif
|
||||
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
atomic_zu_t mapped; /* Partially derived. */
|
||||
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
atomic_zu_t retained; /* Derived. */
|
||||
|
||||
/* Number of extent_t structs allocated by base, but not being used. */
|
||||
atomic_zu_t extent_avail;
|
||||
|
||||
arena_stats_decay_t decay_dirty;
|
||||
arena_stats_decay_t decay_muzzy;
|
||||
|
||||
atomic_zu_t base; /* Derived. */
|
||||
atomic_zu_t internal;
|
||||
atomic_zu_t resident; /* Derived. */
|
||||
atomic_zu_t metadata_thp;
|
||||
|
||||
atomic_zu_t allocated_large; /* Derived. */
|
||||
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||
arena_stats_u64_t nfills_large; /* Derived. */
|
||||
arena_stats_u64_t nflushes_large; /* Derived. */
|
||||
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||
|
||||
/* VM space had to be leaked (undocumented). Normally 0. */
|
||||
atomic_zu_t abandoned_vm;
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
if (config_debug) {
|
||||
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
|
||||
assert(((char *)arena_stats)[i] == 0);
|
||||
}
|
||||
}
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
|
||||
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
/* Memory is zeroed, so there is no need to clear stats. */
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_lock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_u64(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return *p;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p += x;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p -= x;
|
||||
assert(*p + x >= *p);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-atomically sets *dst += src. *dst needs external synchronization.
|
||||
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
|
||||
* the types here are atomic).
|
||||
*/
|
||||
static inline void
|
||||
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
|
||||
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
#else
|
||||
*dst += src;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
atomic_zu_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
atomic_zu_t *p, size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
atomic_zu_t *p, size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Like the _u64 variant, needs an externally synchronized *dst. */
|
||||
static inline void
|
||||
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
|
||||
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
|
||||
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
|
||||
arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
|
||||
arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
|
||||
11
deps/jemalloc/include/jemalloc/internal/arena_structs_a.h
vendored
Normal file
11
deps/jemalloc/include/jemalloc/internal/arena_structs_a.h
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
|
||||
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
|
||||
struct arena_slab_data_s {
|
||||
/* Per region allocated/deallocated bitmap. */
|
||||
bitmap_t bitmap[BITMAP_GROUPS_MAX];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
|
||||
232
deps/jemalloc/include/jemalloc/internal/arena_structs_b.h
vendored
Normal file
232
deps/jemalloc/include/jemalloc/internal/arena_structs_b.h
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||
|
||||
#include "jemalloc/internal/arena_stats.h"
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/smoothstep.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
struct arena_decay_s {
|
||||
/* Synchronizes all non-atomic fields. */
|
||||
malloc_mutex_t mtx;
|
||||
/*
|
||||
* True if a thread is currently purging the extents associated with
|
||||
* this decay structure.
|
||||
*/
|
||||
bool purging;
|
||||
/*
|
||||
* Approximate time in milliseconds from the creation of a set of unused
|
||||
* dirty pages until an equivalent set of unused dirty pages is purged
|
||||
* and/or reused.
|
||||
*/
|
||||
atomic_zd_t time_ms;
|
||||
/* time / SMOOTHSTEP_NSTEPS. */
|
||||
nstime_t interval;
|
||||
/*
|
||||
* Time at which the current decay interval logically started. We do
|
||||
* not actually advance to a new epoch until sometime after it starts
|
||||
* because of scheduling and computation delays, and it is even possible
|
||||
* to completely skip epochs. In all cases, during epoch advancement we
|
||||
* merge all relevant activity into the most recently recorded epoch.
|
||||
*/
|
||||
nstime_t epoch;
|
||||
/* Deadline randomness generator. */
|
||||
uint64_t jitter_state;
|
||||
/*
|
||||
* Deadline for current epoch. This is the sum of interval and per
|
||||
* epoch jitter which is a uniform random variable in [0..interval).
|
||||
* Epochs always advance by precise multiples of interval, but we
|
||||
* randomize the deadline to reduce the likelihood of arenas purging in
|
||||
* lockstep.
|
||||
*/
|
||||
nstime_t deadline;
|
||||
/*
|
||||
* Number of unpurged pages at beginning of current epoch. During epoch
|
||||
* advancement we use the delta between arena->decay_*.nunpurged and
|
||||
* extents_npages_get(&arena->extents_*) to determine how many dirty
|
||||
* pages, if any, were generated.
|
||||
*/
|
||||
size_t nunpurged;
|
||||
/*
|
||||
* Trailing log of how many unused dirty pages were generated during
|
||||
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
|
||||
* element is the most recent epoch. Corresponding epoch times are
|
||||
* relative to epoch.
|
||||
*/
|
||||
size_t backlog[SMOOTHSTEP_NSTEPS];
|
||||
|
||||
/*
|
||||
* Pointer to associated stats. These stats are embedded directly in
|
||||
* the arena's stats due to how stats structures are shared between the
|
||||
* arena and ctl code.
|
||||
*
|
||||
* Synchronization: Same as associated arena's stats field. */
|
||||
arena_stats_decay_t *stats;
|
||||
/* Peak number of pages in associated extents. Used for debug only. */
|
||||
uint64_t ceil_npages;
|
||||
};
|
||||
|
||||
struct arena_s {
|
||||
/*
|
||||
* Number of threads currently assigned to this arena. Each thread has
|
||||
* two distinct assignments, one for application-serving allocation, and
|
||||
* the other for internal metadata allocation. Internal metadata must
|
||||
* not be allocated from arenas explicitly created via the arenas.create
|
||||
* mallctl, because the arena.<i>.reset mallctl indiscriminately
|
||||
* discards all allocations for the affected arena.
|
||||
*
|
||||
* 0: Application allocation.
|
||||
* 1: Internal metadata allocation.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_u_t nthreads[2];
|
||||
|
||||
/* Next bin shard for binding new threads. Synchronization: atomic. */
|
||||
atomic_u_t binshard_next;
|
||||
|
||||
/*
|
||||
* When percpu_arena is enabled, to amortize the cost of reading /
|
||||
* updating the current CPU id, track the most recent thread accessing
|
||||
* this arena, and only read CPU if there is a mismatch.
|
||||
*/
|
||||
tsdn_t *last_thd;
|
||||
|
||||
/* Synchronization: internal. */
|
||||
arena_stats_t stats;
|
||||
|
||||
/*
|
||||
* Lists of tcaches and cache_bin_array_descriptors for extant threads
|
||||
* associated with this arena. Stats from these are merged
|
||||
* incrementally, and at exit if opt_stats_print is enabled.
|
||||
*
|
||||
* Synchronization: tcache_ql_mtx.
|
||||
*/
|
||||
ql_head(tcache_t) tcache_ql;
|
||||
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
|
||||
malloc_mutex_t tcache_ql_mtx;
|
||||
|
||||
/* Synchronization: internal. */
|
||||
prof_accum_t prof_accum;
|
||||
|
||||
/*
|
||||
* PRNG state for cache index randomization of large allocation base
|
||||
* pointers.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_zu_t offset_state;
|
||||
|
||||
/*
|
||||
* Extent serial number generator state.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_zu_t extent_sn_next;
|
||||
|
||||
/*
|
||||
* Represents a dss_prec_t, but atomically.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_u_t dss_prec;
|
||||
|
||||
/*
|
||||
* Number of pages in active extents.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
atomic_zu_t nactive;
|
||||
|
||||
/*
|
||||
* Extant large allocations.
|
||||
*
|
||||
* Synchronization: large_mtx.
|
||||
*/
|
||||
extent_list_t large;
|
||||
/* Synchronizes all large allocation/update/deallocation. */
|
||||
malloc_mutex_t large_mtx;
|
||||
|
||||
/*
|
||||
* Collections of extents that were previously allocated. These are
|
||||
* used when allocating extents, in an attempt to re-use address space.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
extents_t extents_dirty;
|
||||
extents_t extents_muzzy;
|
||||
extents_t extents_retained;
|
||||
|
||||
/*
|
||||
* Decay-based purging state, responsible for scheduling extent state
|
||||
* transitions.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
arena_decay_t decay_dirty; /* dirty --> muzzy */
|
||||
arena_decay_t decay_muzzy; /* muzzy --> retained */
|
||||
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
* request via the extent hooks (only if opt_retain). This limits the
|
||||
* number of disjoint virtual memory ranges so that extent merging can
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*
|
||||
* Synchronization: extent_grow_mtx
|
||||
*/
|
||||
pszind_t extent_grow_next;
|
||||
pszind_t retain_grow_limit;
|
||||
malloc_mutex_t extent_grow_mtx;
|
||||
|
||||
/*
|
||||
* Available extent structures that were allocated via
|
||||
* base_alloc_extent().
|
||||
*
|
||||
* Synchronization: extent_avail_mtx.
|
||||
*/
|
||||
extent_tree_t extent_avail;
|
||||
atomic_zu_t extent_avail_cnt;
|
||||
malloc_mutex_t extent_avail_mtx;
|
||||
|
||||
/*
|
||||
* bins is used to store heaps of free regions.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
bins_t bins[SC_NBINS];
|
||||
|
||||
/*
|
||||
* Base allocator, from which arena metadata are allocated.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
base_t *base;
|
||||
/* Used to determine uptime. Read-only after initialization. */
|
||||
nstime_t create_time;
|
||||
};
|
||||
|
||||
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
||||
struct arena_tdata_s {
|
||||
ticker_t decay_ticker;
|
||||
};
|
||||
|
||||
/* Used to pass rtree lookup context down the path. */
|
||||
struct alloc_ctx_s {
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
|
||||
51
deps/jemalloc/include/jemalloc/internal/arena_types.h
vendored
Normal file
51
deps/jemalloc/include/jemalloc/internal/arena_types.h
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
/* Maximum number of regions in one slab. */
|
||||
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
|
||||
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
|
||||
|
||||
/* Default decay times in milliseconds. */
|
||||
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
|
||||
#define MUZZY_DECAY_MS_DEFAULT (0)
|
||||
/* Number of event ticks between time checks. */
|
||||
#define DECAY_NTICKS_PER_UPDATE 1000
|
||||
|
||||
typedef struct arena_slab_data_s arena_slab_data_t;
|
||||
typedef struct arena_decay_s arena_decay_t;
|
||||
typedef struct arena_s arena_t;
|
||||
typedef struct arena_tdata_s arena_tdata_t;
|
||||
typedef struct alloc_ctx_s alloc_ctx_t;
|
||||
|
||||
typedef enum {
|
||||
percpu_arena_mode_names_base = 0, /* Used for options processing. */
|
||||
|
||||
/*
|
||||
* *_uninit are used only during bootstrapping, and must correspond
|
||||
* to initialized variant plus percpu_arena_mode_enabled_base.
|
||||
*/
|
||||
percpu_arena_uninit = 0,
|
||||
per_phycpu_arena_uninit = 1,
|
||||
|
||||
/* All non-disabled modes must come after percpu_arena_disabled. */
|
||||
percpu_arena_disabled = 2,
|
||||
|
||||
percpu_arena_mode_names_limit = 3, /* Used for options processing. */
|
||||
percpu_arena_mode_enabled_base = 3,
|
||||
|
||||
percpu_arena = 3,
|
||||
per_phycpu_arena = 4 /* Hyper threads share arena. */
|
||||
} percpu_arena_mode_t;
|
||||
|
||||
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
|
||||
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
|
||||
|
||||
/*
|
||||
* When allocation_size >= oversize_threshold, use the dedicated huge arena
|
||||
* (unless have explicitly spicified arena index). 0 disables the feature.
|
||||
*/
|
||||
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
|
||||
56
deps/jemalloc/include/jemalloc/internal/assert.h
vendored
Normal file
56
deps/jemalloc/include/jemalloc/internal/assert.h
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/*
|
||||
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||
* assertion failure.
|
||||
*/
|
||||
#ifndef assert
|
||||
#define assert(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
__FILE__, __LINE__, #e); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_reached
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef not_implemented
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
not_implemented(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#ifndef cassert
|
||||
#define cassert(c) do { \
|
||||
if (unlikely(!(c))) { \
|
||||
not_reached(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
86
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
Normal file
86
deps/jemalloc/include/jemalloc/internal/atomic.h
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ATOMIC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_H
|
||||
|
||||
#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
|
||||
|
||||
#define JEMALLOC_U8_ATOMICS
|
||||
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_atomic.h"
|
||||
# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
|
||||
# undef JEMALLOC_U8_ATOMICS
|
||||
# endif
|
||||
#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_gcc_sync.h"
|
||||
# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
|
||||
# undef JEMALLOC_U8_ATOMICS
|
||||
# endif
|
||||
#elif defined(_MSC_VER)
|
||||
# include "jemalloc/internal/atomic_msvc.h"
|
||||
#elif defined(JEMALLOC_C11_ATOMICS)
|
||||
# include "jemalloc/internal/atomic_c11.h"
|
||||
#else
|
||||
# error "Don't have atomics implemented on this platform."
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This header gives more or less a backport of C11 atomics. The user can write
|
||||
* JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
|
||||
* counterparts of the C11 atomic functions for type, as so:
|
||||
* JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
|
||||
* and then write things like:
|
||||
* int *some_ptr;
|
||||
* atomic_pi_t atomic_ptr_to_int;
|
||||
* atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
|
||||
* int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
|
||||
* assert(some_ptr == prev_value);
|
||||
* and expect things to work in the obvious way.
|
||||
*
|
||||
* Also included (with naming differences to avoid conflicts with the standard
|
||||
* library):
|
||||
* atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
|
||||
* ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pure convenience, so that we don't have to type "atomic_memory_order_"
|
||||
* quite so often.
|
||||
*/
|
||||
#define ATOMIC_RELAXED atomic_memory_order_relaxed
|
||||
#define ATOMIC_ACQUIRE atomic_memory_order_acquire
|
||||
#define ATOMIC_RELEASE atomic_memory_order_release
|
||||
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
|
||||
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
|
||||
|
||||
/*
|
||||
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
|
||||
* fact.
|
||||
*/
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# define JEMALLOC_ATOMIC_U64
|
||||
#endif
|
||||
|
||||
JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
|
||||
|
||||
/*
|
||||
* There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
|
||||
* platform that actually needs to know the size, MSVC.
|
||||
*/
|
||||
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0)
|
||||
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
|
||||
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
|
||||
#endif
|
||||
|
||||
#undef ATOMIC_INLINE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
|
||||
97
deps/jemalloc/include/jemalloc/internal/atomic_c11.h
vendored
Normal file
97
deps/jemalloc/include/jemalloc/internal/atomic_c11.h
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_C11_H
|
||||
|
||||
#include <stdatomic.h>
|
||||
|
||||
#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
|
||||
|
||||
#define atomic_memory_order_t memory_order
|
||||
#define atomic_memory_order_relaxed memory_order_relaxed
|
||||
#define atomic_memory_order_acquire memory_order_acquire
|
||||
#define atomic_memory_order_release memory_order_release
|
||||
#define atomic_memory_order_acq_rel memory_order_acq_rel
|
||||
#define atomic_memory_order_seq_cst memory_order_seq_cst
|
||||
|
||||
#define atomic_fence atomic_thread_fence
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
typedef _Atomic(type) atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
/* \
|
||||
* A strict interpretation of the C standard prevents \
|
||||
* atomic_load from taking a const argument, but it's \
|
||||
* convenient for our purposes. This cast is a workaround. \
|
||||
*/ \
|
||||
atomic_##short_type##_t* a_nonconst = \
|
||||
(atomic_##short_type##_t*)a; \
|
||||
return atomic_load_explicit(a_nonconst, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
atomic_store_explicit(a, val, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return atomic_exchange_explicit(a, val, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return atomic_compare_exchange_weak_explicit(a, expected, \
|
||||
desired, success_mo, failure_mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return atomic_compare_exchange_strong_explicit(a, expected, \
|
||||
desired, success_mo, failure_mo); \
|
||||
}
|
||||
|
||||
/*
|
||||
* Integral types have some special operations available that non-integral ones
|
||||
* lack.
|
||||
*/
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_add_explicit(a, val, mo); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_sub_explicit(a, val, mo); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_and_explicit(a, val, mo); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_or_explicit(a, val, mo); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return atomic_fetch_xor_explicit(a, val, mo); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */
|
||||
129
deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
vendored
Normal file
129
deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||
|
||||
typedef enum {
|
||||
atomic_memory_order_relaxed,
|
||||
atomic_memory_order_acquire,
|
||||
atomic_memory_order_release,
|
||||
atomic_memory_order_acq_rel,
|
||||
atomic_memory_order_seq_cst
|
||||
} atomic_memory_order_t;
|
||||
|
||||
ATOMIC_INLINE int
|
||||
atomic_enum_to_builtin(atomic_memory_order_t mo) {
|
||||
switch (mo) {
|
||||
case atomic_memory_order_relaxed:
|
||||
return __ATOMIC_RELAXED;
|
||||
case atomic_memory_order_acquire:
|
||||
return __ATOMIC_ACQUIRE;
|
||||
case atomic_memory_order_release:
|
||||
return __ATOMIC_RELEASE;
|
||||
case atomic_memory_order_acq_rel:
|
||||
return __ATOMIC_ACQ_REL;
|
||||
case atomic_memory_order_seq_cst:
|
||||
return __ATOMIC_SEQ_CST;
|
||||
}
|
||||
/* Can't happen; the switch is exhaustive. */
|
||||
not_reached();
|
||||
}
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_fence(atomic_memory_order_t mo) {
|
||||
__atomic_thread_fence(atomic_enum_to_builtin(mo));
|
||||
}
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
typedef struct { \
|
||||
type repr; \
|
||||
} atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
type result; \
|
||||
__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
|
||||
return result; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
type result; \
|
||||
__atomic_exchange(&a->repr, &val, &result, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
return result; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
UNUSED type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return __atomic_compare_exchange(&a->repr, expected, &desired, \
|
||||
true, atomic_enum_to_builtin(success_mo), \
|
||||
atomic_enum_to_builtin(failure_mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
UNUSED type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
return __atomic_compare_exchange(&a->repr, expected, &desired, \
|
||||
false, \
|
||||
atomic_enum_to_builtin(success_mo), \
|
||||
atomic_enum_to_builtin(failure_mo)); \
|
||||
}
|
||||
|
||||
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_add(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_sub(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_and(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_or(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __atomic_fetch_xor(&a->repr, val, \
|
||||
atomic_enum_to_builtin(mo)); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
|
||||
195
deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h
vendored
Normal file
195
deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
|
||||
|
||||
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||
|
||||
typedef enum {
|
||||
atomic_memory_order_relaxed,
|
||||
atomic_memory_order_acquire,
|
||||
atomic_memory_order_release,
|
||||
atomic_memory_order_acq_rel,
|
||||
atomic_memory_order_seq_cst
|
||||
} atomic_memory_order_t;
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_fence(atomic_memory_order_t mo) {
|
||||
/* Easy cases first: no barrier, and full barrier. */
|
||||
if (mo == atomic_memory_order_relaxed) {
|
||||
asm volatile("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
if (mo == atomic_memory_order_seq_cst) {
|
||||
asm volatile("" ::: "memory");
|
||||
__sync_synchronize();
|
||||
asm volatile("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
asm volatile("" ::: "memory");
|
||||
# if defined(__i386__) || defined(__x86_64__)
|
||||
/* This is implicit on x86. */
|
||||
# elif defined(__ppc64__)
|
||||
asm volatile("lwsync");
|
||||
# elif defined(__ppc__)
|
||||
asm volatile("sync");
|
||||
# elif defined(__sparc__) && defined(__arch64__)
|
||||
if (mo == atomic_memory_order_acquire) {
|
||||
asm volatile("membar #LoadLoad | #LoadStore");
|
||||
} else if (mo == atomic_memory_order_release) {
|
||||
asm volatile("membar #LoadStore | #StoreStore");
|
||||
} else {
|
||||
asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
|
||||
}
|
||||
# else
|
||||
__sync_synchronize();
|
||||
# endif
|
||||
asm volatile("" ::: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* A correct implementation of seq_cst loads and stores on weakly ordered
|
||||
* architectures could do either of the following:
|
||||
* 1. store() is weak-fence -> store -> strong fence, load() is load ->
|
||||
* strong-fence.
|
||||
* 2. store() is strong-fence -> store, load() is strong-fence -> load ->
|
||||
* weak-fence.
|
||||
* The tricky thing is, load() and store() above can be the load or store
|
||||
* portions of a gcc __sync builtin, so we have to follow GCC's lead, which
|
||||
* means going with strategy 2.
|
||||
* On strongly ordered architectures, the natural strategy is to stick a strong
|
||||
* fence after seq_cst stores, and have naked loads. So we want the strong
|
||||
* fences in different places on different architectures.
|
||||
* atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
|
||||
* accomplish this.
|
||||
*/
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_pre_sc_load_fence() {
|
||||
# if defined(__i386__) || defined(__x86_64__) || \
|
||||
(defined(__sparc__) && defined(__arch64__))
|
||||
atomic_fence(atomic_memory_order_relaxed);
|
||||
# else
|
||||
atomic_fence(atomic_memory_order_seq_cst);
|
||||
# endif
|
||||
}
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_post_sc_store_fence() {
|
||||
# if defined(__i386__) || defined(__x86_64__) || \
|
||||
(defined(__sparc__) && defined(__arch64__))
|
||||
atomic_fence(atomic_memory_order_seq_cst);
|
||||
# else
|
||||
atomic_fence(atomic_memory_order_relaxed);
|
||||
# endif
|
||||
|
||||
}
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
typedef struct { \
|
||||
type volatile repr; \
|
||||
} atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
if (mo == atomic_memory_order_seq_cst) { \
|
||||
atomic_pre_sc_load_fence(); \
|
||||
} \
|
||||
type result = a->repr; \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_acquire); \
|
||||
} \
|
||||
return result; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_release); \
|
||||
} \
|
||||
a->repr = val; \
|
||||
if (mo == atomic_memory_order_seq_cst) { \
|
||||
atomic_post_sc_store_fence(); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
/* \
|
||||
* Because of FreeBSD, we care about gcc 4.2, which doesn't have\
|
||||
* an atomic exchange builtin. We fake it with a CAS loop. \
|
||||
*/ \
|
||||
while (true) { \
|
||||
type old = a->repr; \
|
||||
if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \
|
||||
return old; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||
desired); \
|
||||
if (prev == *expected) { \
|
||||
return true; \
|
||||
} else { \
|
||||
*expected = prev; \
|
||||
return false; \
|
||||
} \
|
||||
} \
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||
desired); \
|
||||
if (prev == *expected) { \
|
||||
return true; \
|
||||
} else { \
|
||||
*expected = prev; \
|
||||
return false; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||
/* unused */ lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_add(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_sub(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_and(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_or(&a->repr, val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return __sync_fetch_and_xor(&a->repr, val); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */
|
||||
158
deps/jemalloc/include/jemalloc/internal/atomic_msvc.h
vendored
Normal file
158
deps/jemalloc/include/jemalloc/internal/atomic_msvc.h
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
|
||||
#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
|
||||
|
||||
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||
|
||||
typedef enum {
|
||||
atomic_memory_order_relaxed,
|
||||
atomic_memory_order_acquire,
|
||||
atomic_memory_order_release,
|
||||
atomic_memory_order_acq_rel,
|
||||
atomic_memory_order_seq_cst
|
||||
} atomic_memory_order_t;
|
||||
|
||||
typedef char atomic_repr_0_t;
|
||||
typedef short atomic_repr_1_t;
|
||||
typedef long atomic_repr_2_t;
|
||||
typedef __int64 atomic_repr_3_t;
|
||||
|
||||
ATOMIC_INLINE void
|
||||
atomic_fence(atomic_memory_order_t mo) {
|
||||
_ReadWriteBarrier();
|
||||
# if defined(_M_ARM) || defined(_M_ARM64)
|
||||
/* ARM needs a barrier for everything but relaxed. */
|
||||
if (mo != atomic_memory_order_relaxed) {
|
||||
MemoryBarrier();
|
||||
}
|
||||
# elif defined(_M_IX86) || defined (_M_X64)
|
||||
/* x86 needs a barrier only for seq_cst. */
|
||||
if (mo == atomic_memory_order_seq_cst) {
|
||||
MemoryBarrier();
|
||||
}
|
||||
# else
|
||||
# error "Don't know how to create atomics for this platform for MSVC."
|
||||
# endif
|
||||
_ReadWriteBarrier();
|
||||
}
|
||||
|
||||
#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
|
||||
|
||||
#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
|
||||
#define ATOMIC_RAW_CONCAT(a, b) a ## b
|
||||
|
||||
#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
|
||||
base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
|
||||
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
|
||||
ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
|
||||
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_0 8
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_1 16
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_2
|
||||
#define ATOMIC_INTERLOCKED_SUFFIX_3 64
|
||||
|
||||
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
|
||||
typedef struct { \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
|
||||
} atomic_##short_type##_t; \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||
atomic_memory_order_t mo) { \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_acquire); \
|
||||
} \
|
||||
return (type) ret; \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE void \
|
||||
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
if (mo != atomic_memory_order_relaxed) { \
|
||||
atomic_fence(atomic_memory_order_release); \
|
||||
} \
|
||||
a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
|
||||
if (mo == atomic_memory_order_seq_cst) { \
|
||||
atomic_fence(atomic_memory_order_seq_cst); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
|
||||
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) e = \
|
||||
(ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) d = \
|
||||
(ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
|
||||
ATOMIC_INTERLOCKED_REPR(lg_size) old = \
|
||||
ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
|
||||
lg_size)(&a->repr, d, e); \
|
||||
if (old == e) { \
|
||||
return true; \
|
||||
} else { \
|
||||
*expected = (type)old; \
|
||||
return false; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
/* We implement the weak version with strong semantics. */ \
|
||||
return atomic_compare_exchange_weak_##short_type(a, expected, \
|
||||
desired, success_mo, failure_mo); \
|
||||
}
|
||||
|
||||
|
||||
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
|
||||
JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
|
||||
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
/* \
|
||||
* MSVC warns on negation of unsigned operands, but for us it \
|
||||
* gives exactly the right semantics (MAX_TYPE + 1 - operand). \
|
||||
*/ \
|
||||
__pragma(warning(push)) \
|
||||
__pragma(warning(disable: 4146)) \
|
||||
return atomic_fetch_add_##short_type(a, -val, mo); \
|
||||
__pragma(warning(pop)) \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
|
||||
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
|
||||
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
} \
|
||||
ATOMIC_INLINE type \
|
||||
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
|
||||
type val, atomic_memory_order_t mo) { \
|
||||
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
|
||||
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
|
||||
32
deps/jemalloc/include/jemalloc/internal/background_thread_externs.h
vendored
Normal file
32
deps/jemalloc/include/jemalloc/internal/background_thread_externs.h
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||
|
||||
extern bool opt_background_thread;
|
||||
extern size_t opt_max_background_threads;
|
||||
extern malloc_mutex_t background_thread_lock;
|
||||
extern atomic_b_t background_thread_enabled_state;
|
||||
extern size_t n_background_threads;
|
||||
extern size_t max_background_threads;
|
||||
extern background_thread_info_t *background_thread_info;
|
||||
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
|
||||
bool background_threads_enable(tsd_t *tsd);
|
||||
bool background_threads_disable(tsd_t *tsd);
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new);
|
||||
void background_thread_prefork0(tsdn_t *tsdn);
|
||||
void background_thread_prefork1(tsdn_t *tsdn);
|
||||
void background_thread_postfork_parent(tsdn_t *tsdn);
|
||||
void background_thread_postfork_child(tsdn_t *tsdn);
|
||||
bool background_thread_stats_read(tsdn_t *tsdn,
|
||||
background_thread_stats_t *stats);
|
||||
void background_thread_ctl_init(tsdn_t *tsdn);
|
||||
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
#endif
|
||||
bool background_thread_boot0(void);
|
||||
bool background_thread_boot1(tsdn_t *tsdn);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
|
||||
62
deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h
vendored
Normal file
62
deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
background_thread_enabled(void) {
|
||||
return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
background_thread_enabled_set(tsdn_t *tsdn, bool state) {
|
||||
malloc_mutex_assert_owner(tsdn, &background_thread_lock);
|
||||
atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE background_thread_info_t *
|
||||
arena_background_thread_info_get(arena_t *arena) {
|
||||
unsigned arena_ind = arena_ind_get(arena);
|
||||
return &background_thread_info[arena_ind % max_background_threads];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE background_thread_info_t *
|
||||
background_thread_info_get(size_t ind) {
|
||||
return &background_thread_info[ind % max_background_threads];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
background_thread_wakeup_time_get(background_thread_info_t *info) {
|
||||
uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
|
||||
assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
|
||||
(next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
|
||||
return next_wakeup;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
|
||||
uint64_t wakeup_time) {
|
||||
malloc_mutex_assert_owner(tsdn, &info->mtx);
|
||||
atomic_store_b(&info->indefinite_sleep,
|
||||
wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
|
||||
nstime_init(&info->next_wakeup, wakeup_time);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
background_thread_indefinite_sleep(background_thread_info_t *info) {
|
||||
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
|
||||
bool is_background_thread) {
|
||||
if (!background_thread_enabled() || is_background_thread) {
|
||||
return;
|
||||
}
|
||||
background_thread_info_t *info =
|
||||
arena_background_thread_info_get(arena);
|
||||
if (background_thread_indefinite_sleep(info)) {
|
||||
background_thread_interval_check(tsdn, arena,
|
||||
&arena->decay_dirty, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
|
||||
54
deps/jemalloc/include/jemalloc/internal/background_thread_structs.h
vendored
Normal file
54
deps/jemalloc/include/jemalloc/internal/background_thread_structs.h
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
|
||||
|
||||
/* This file really combines "structs" and "types", but only transitionally. */
|
||||
|
||||
#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
|
||||
# define JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
#endif
|
||||
|
||||
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
|
||||
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
|
||||
#define DEFAULT_NUM_BACKGROUND_THREAD 4
|
||||
|
||||
typedef enum {
|
||||
background_thread_stopped,
|
||||
background_thread_started,
|
||||
/* Thread waits on the global lock when paused (for arena_reset). */
|
||||
background_thread_paused,
|
||||
} background_thread_state_t;
|
||||
|
||||
struct background_thread_info_s {
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
/* Background thread is pthread specific. */
|
||||
pthread_t thread;
|
||||
pthread_cond_t cond;
|
||||
#endif
|
||||
malloc_mutex_t mtx;
|
||||
background_thread_state_t state;
|
||||
/* When true, it means no wakeup scheduled. */
|
||||
atomic_b_t indefinite_sleep;
|
||||
/* Next scheduled wakeup time (absolute time in ns). */
|
||||
nstime_t next_wakeup;
|
||||
/*
|
||||
* Since the last background thread run, newly added number of pages
|
||||
* that need to be purged by the next wakeup. This is adjusted on
|
||||
* epoch advance, and is used to determine whether we should signal the
|
||||
* background thread to wake up earlier.
|
||||
*/
|
||||
size_t npages_to_purge_new;
|
||||
/* Stats: total number of runs since started. */
|
||||
uint64_t tot_n_runs;
|
||||
/* Stats: total sleep time since started. */
|
||||
nstime_t tot_sleep_time;
|
||||
};
|
||||
typedef struct background_thread_info_s background_thread_info_t;
|
||||
|
||||
struct background_thread_stats_s {
|
||||
size_t num_threads;
|
||||
uint64_t num_runs;
|
||||
nstime_t run_interval;
|
||||
};
|
||||
typedef struct background_thread_stats_s background_thread_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
|
||||
26
deps/jemalloc/include/jemalloc/internal/base.h
vendored
Normal file
26
deps/jemalloc/include/jemalloc/internal/base.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *base_alloc(size_t size);
|
||||
void *base_calloc(size_t number, size_t size);
|
||||
extent_node_t *base_node_alloc(void);
|
||||
void base_node_dealloc(extent_node_t *node);
|
||||
bool base_boot(void);
|
||||
void base_prefork(void);
|
||||
void base_postfork_parent(void);
|
||||
void base_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
22
deps/jemalloc/include/jemalloc/internal/base_externs.h
vendored
Normal file
22
deps/jemalloc/include/jemalloc/internal/base_externs.h
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||
|
||||
extern metadata_thp_mode_t opt_metadata_thp;
|
||||
extern const char *metadata_thp_mode_names[];
|
||||
|
||||
base_t *b0get(void);
|
||||
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||
void base_delete(tsdn_t *tsdn, base_t *base);
|
||||
extent_hooks_t *base_extent_hooks_get(base_t *base);
|
||||
extent_hooks_t *base_extent_hooks_set(base_t *base,
|
||||
extent_hooks_t *extent_hooks);
|
||||
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
||||
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
|
||||
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
||||
size_t *resident, size_t *mapped, size_t *n_thp);
|
||||
void base_prefork(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_child(tsdn_t *tsdn, base_t *base);
|
||||
bool base_boot(tsdn_t *tsdn);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
|
||||
13
deps/jemalloc/include/jemalloc/internal/base_inlines.h
vendored
Normal file
13
deps/jemalloc/include/jemalloc/internal/base_inlines.h
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_BASE_INLINES_H
|
||||
|
||||
static inline unsigned
|
||||
base_ind_get(const base_t *base) {
|
||||
return base->ind;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
metadata_thp_enabled(void) {
|
||||
return (opt_metadata_thp != metadata_thp_disabled);
|
||||
}
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
|
||||
59
deps/jemalloc/include/jemalloc/internal/base_structs.h
vendored
Normal file
59
deps/jemalloc/include/jemalloc/internal/base_structs.h
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
/* Embedded at the beginning of every block of base-managed virtual memory. */
|
||||
struct base_block_s {
|
||||
/* Total size of block's virtual memory mapping. */
|
||||
size_t size;
|
||||
|
||||
/* Next block in list of base's blocks. */
|
||||
base_block_t *next;
|
||||
|
||||
/* Tracks unused trailing space. */
|
||||
extent_t extent;
|
||||
};
|
||||
|
||||
struct base_s {
|
||||
/* Associated arena's index within the arenas array. */
|
||||
unsigned ind;
|
||||
|
||||
/*
|
||||
* User-configurable extent hook functions. Points to an
|
||||
* extent_hooks_t.
|
||||
*/
|
||||
atomic_p_t extent_hooks;
|
||||
|
||||
/* Protects base_alloc() and base_stats_get() operations. */
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
/* Using THP when true (metadata_thp auto mode). */
|
||||
bool auto_thp_switched;
|
||||
/*
|
||||
* Most recent size class in the series of increasingly large base
|
||||
* extents. Logarithmic spacing between subsequent allocations ensures
|
||||
* that the total number of distinct mappings remains small.
|
||||
*/
|
||||
pszind_t pind_last;
|
||||
|
||||
/* Serial number generation state. */
|
||||
size_t extent_sn_next;
|
||||
|
||||
/* Chain of all blocks associated with base. */
|
||||
base_block_t *blocks;
|
||||
|
||||
/* Heap of extents that track unused trailing space within blocks. */
|
||||
extent_heap_t avail[SC_NSIZES];
|
||||
|
||||
/* Stats, only maintained if config_stats. */
|
||||
size_t allocated;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
/* Number of THP regions touched. */
|
||||
size_t n_thp;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
|
||||
33
deps/jemalloc/include/jemalloc/internal/base_types.h
vendored
Normal file
33
deps/jemalloc/include/jemalloc/internal/base_types.h
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||
|
||||
typedef struct base_block_s base_block_t;
|
||||
typedef struct base_s base_t;
|
||||
|
||||
#define METADATA_THP_DEFAULT metadata_thp_disabled
|
||||
|
||||
/*
|
||||
* In auto mode, arenas switch to huge pages for the base allocator on the
|
||||
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
|
||||
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
|
||||
*/
|
||||
|
||||
#define BASE_AUTO_THP_THRESHOLD 2
|
||||
#define BASE_AUTO_THP_THRESHOLD_A0 5
|
||||
|
||||
typedef enum {
|
||||
metadata_thp_disabled = 0,
|
||||
/*
|
||||
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
|
||||
* + low usage arena (i.e. THP becomes a significant percentage), the
|
||||
* "auto" option only starts using THP after a base allocator used up
|
||||
* the first THP region. Starting from the second hugepage (in a single
|
||||
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
|
||||
* right away.
|
||||
*/
|
||||
metadata_thp_auto = 1,
|
||||
metadata_thp_always = 2,
|
||||
metadata_thp_mode_limit = 3
|
||||
} metadata_thp_mode_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
|
||||
123
deps/jemalloc/include/jemalloc/internal/bin.h
vendored
Normal file
123
deps/jemalloc/include/jemalloc/internal/bin.h
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BIN_H
|
||||
#define JEMALLOC_INTERNAL_BIN_H
|
||||
|
||||
#include "jemalloc/internal/bin_stats.h"
|
||||
#include "jemalloc/internal/bin_types.h"
|
||||
#include "jemalloc/internal/extent_types.h"
|
||||
#include "jemalloc/internal/extent_structs.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
/*
|
||||
* A bin contains a set of extents that are currently being used for slab
|
||||
* allocations.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of arena_t's bins array
|
||||
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||
*
|
||||
* Each slab has the following layout:
|
||||
*
|
||||
* /--------------------\
|
||||
* | region 0 |
|
||||
* |--------------------|
|
||||
* | region 1 |
|
||||
* |--------------------|
|
||||
* | ... |
|
||||
* | ... |
|
||||
* | ... |
|
||||
* |--------------------|
|
||||
* | region nregs-1 |
|
||||
* \--------------------/
|
||||
*/
|
||||
typedef struct bin_info_s bin_info_t;
|
||||
struct bin_info_s {
|
||||
/* Size of regions in a slab for this bin's size class. */
|
||||
size_t reg_size;
|
||||
|
||||
/* Total size of a slab for this bin's size class. */
|
||||
size_t slab_size;
|
||||
|
||||
/* Total number of regions in a slab for this bin's size class. */
|
||||
uint32_t nregs;
|
||||
|
||||
/* Number of sharded bins in each arena for this size class. */
|
||||
uint32_t n_shards;
|
||||
|
||||
/*
|
||||
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||
* bin.
|
||||
*/
|
||||
bitmap_info_t bitmap_info;
|
||||
};
|
||||
|
||||
extern bin_info_t bin_infos[SC_NBINS];
|
||||
|
||||
typedef struct bin_s bin_t;
|
||||
struct bin_s {
|
||||
/* All operations on bin_t fields require lock ownership. */
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/*
|
||||
* Current slab being used to service allocations of this bin's size
|
||||
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||
* slabcur is reassigned, the previous slab must be deallocated or
|
||||
* inserted into slabs_{nonfull,full}.
|
||||
*/
|
||||
extent_t *slabcur;
|
||||
|
||||
/*
|
||||
* Heap of non-full slabs. This heap is used to assure that new
|
||||
* allocations come from the non-full slab that is oldest/lowest in
|
||||
* memory.
|
||||
*/
|
||||
extent_heap_t slabs_nonfull;
|
||||
|
||||
/* List used to track full slabs. */
|
||||
extent_list_t slabs_full;
|
||||
|
||||
/* Bin statistics. */
|
||||
bin_stats_t stats;
|
||||
};
|
||||
|
||||
/* A set of sharded bins of the same size class. */
|
||||
typedef struct bins_s bins_t;
|
||||
struct bins_s {
|
||||
/* Sharded bins. Dynamically sized. */
|
||||
bin_t *bin_shards;
|
||||
};
|
||||
|
||||
void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
|
||||
bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
|
||||
size_t end_size, size_t nshards);
|
||||
void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
|
||||
|
||||
/* Initializes a bin to empty. Returns true on error. */
|
||||
bool bin_init(bin_t *bin);
|
||||
|
||||
/* Forking. */
|
||||
void bin_prefork(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
|
||||
|
||||
/* Stats. */
|
||||
static inline void
|
||||
bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
|
||||
dst_bin_stats->nmalloc += bin->stats.nmalloc;
|
||||
dst_bin_stats->ndalloc += bin->stats.ndalloc;
|
||||
dst_bin_stats->nrequests += bin->stats.nrequests;
|
||||
dst_bin_stats->curregs += bin->stats.curregs;
|
||||
dst_bin_stats->nfills += bin->stats.nfills;
|
||||
dst_bin_stats->nflushes += bin->stats.nflushes;
|
||||
dst_bin_stats->nslabs += bin->stats.nslabs;
|
||||
dst_bin_stats->reslabs += bin->stats.reslabs;
|
||||
dst_bin_stats->curslabs += bin->stats.curslabs;
|
||||
dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_H */
|
||||
54
deps/jemalloc/include/jemalloc/internal/bin_stats.h
vendored
Normal file
54
deps/jemalloc/include/jemalloc/internal/bin_stats.h
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
#define JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
|
||||
typedef struct bin_stats_s bin_stats_t;
|
||||
struct bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
/* Current size of nonfull slabs heap in this bin. */
|
||||
size_t nonfull_slabs;
|
||||
|
||||
mutex_prof_data_t mutex_data;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
|
||||
17
deps/jemalloc/include/jemalloc/internal/bin_types.h
vendored
Normal file
17
deps/jemalloc/include/jemalloc/internal/bin_types.h
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_BIN_TYPES_H
|
||||
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
|
||||
#define N_BIN_SHARDS_DEFAULT 1
|
||||
|
||||
/* Used in TSD static initializer only. Real init in arena_bind(). */
|
||||
#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}}
|
||||
|
||||
typedef struct tsd_binshards_s tsd_binshards_t;
|
||||
struct tsd_binshards_s {
|
||||
uint8_t binshard[SC_NBINS];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */
|
||||
239
deps/jemalloc/include/jemalloc/internal/bit_util.h
vendored
Normal file
239
deps/jemalloc/include/jemalloc/internal/bit_util.h
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
|
||||
#define JEMALLOC_INTERNAL_BIT_UTIL_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
#define BIT_UTIL_INLINE static inline
|
||||
|
||||
/* Sanity check. */
|
||||
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|
||||
|| !defined(JEMALLOC_INTERNAL_FFS)
|
||||
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
|
||||
#endif
|
||||
|
||||
|
||||
BIT_UTIL_INLINE unsigned
|
||||
ffs_llu(unsigned long long bitmap) {
|
||||
return JEMALLOC_INTERNAL_FFSLL(bitmap);
|
||||
}
|
||||
|
||||
BIT_UTIL_INLINE unsigned
|
||||
ffs_lu(unsigned long bitmap) {
|
||||
return JEMALLOC_INTERNAL_FFSL(bitmap);
|
||||
}
|
||||
|
||||
BIT_UTIL_INLINE unsigned
|
||||
ffs_u(unsigned bitmap) {
|
||||
return JEMALLOC_INTERNAL_FFS(bitmap);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
|
||||
BIT_UTIL_INLINE unsigned
|
||||
popcount_lu(unsigned long bitmap) {
|
||||
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Clears first unset bit in bitmap, and returns
|
||||
* place of bit. bitmap *must not* be 0.
|
||||
*/
|
||||
|
||||
BIT_UTIL_INLINE size_t
|
||||
cfs_lu(unsigned long* bitmap) {
|
||||
size_t bit = ffs_lu(*bitmap) - 1;
|
||||
*bitmap ^= ZU(1) << bit;
|
||||
return bit;
|
||||
}
|
||||
|
||||
BIT_UTIL_INLINE unsigned
|
||||
ffs_zu(size_t bitmap) {
|
||||
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
|
||||
return ffs_u(bitmap);
|
||||
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
|
||||
return ffs_lu(bitmap);
|
||||
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
|
||||
return ffs_llu(bitmap);
|
||||
#else
|
||||
#error No implementation for size_t ffs()
|
||||
#endif
|
||||
}
|
||||
|
||||
BIT_UTIL_INLINE unsigned
|
||||
ffs_u64(uint64_t bitmap) {
|
||||
#if LG_SIZEOF_LONG == 3
|
||||
return ffs_lu(bitmap);
|
||||
#elif LG_SIZEOF_LONG_LONG == 3
|
||||
return ffs_llu(bitmap);
|
||||
#else
|
||||
#error No implementation for 64-bit ffs()
|
||||
#endif
|
||||
}
|
||||
|
||||
BIT_UTIL_INLINE unsigned
|
||||
ffs_u32(uint32_t bitmap) {
|
||||
#if LG_SIZEOF_INT == 2
|
||||
return ffs_u(bitmap);
|
||||
#else
|
||||
#error No implementation for 32-bit ffs()
|
||||
#endif
|
||||
return ffs_u(bitmap);
|
||||
}
|
||||
|
||||
BIT_UTIL_INLINE uint64_t
|
||||
pow2_ceil_u64(uint64_t x) {
|
||||
#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ))
|
||||
if(unlikely(x <= 1)) {
|
||||
return x;
|
||||
}
|
||||
size_t msb_on_index;
|
||||
#if (defined(__amd64__) || defined(__x86_64__))
|
||||
asm ("bsrq %1, %0"
|
||||
: "=r"(msb_on_index) // Outputs.
|
||||
: "r"(x-1) // Inputs.
|
||||
);
|
||||
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
|
||||
msb_on_index = (63 ^ __builtin_clzll(x - 1));
|
||||
#endif
|
||||
assert(msb_on_index < 63);
|
||||
return 1ULL << (msb_on_index + 1);
|
||||
#else
|
||||
x--;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
x |= x >> 32;
|
||||
x++;
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
BIT_UTIL_INLINE uint32_t
|
||||
pow2_ceil_u32(uint32_t x) {
|
||||
#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__)))
|
||||
if(unlikely(x <= 1)) {
|
||||
return x;
|
||||
}
|
||||
size_t msb_on_index;
|
||||
#if (defined(__i386__))
|
||||
asm ("bsr %1, %0"
|
||||
: "=r"(msb_on_index) // Outputs.
|
||||
: "r"(x-1) // Inputs.
|
||||
);
|
||||
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
|
||||
msb_on_index = (31 ^ __builtin_clz(x - 1));
|
||||
#endif
|
||||
assert(msb_on_index < 31);
|
||||
return 1U << (msb_on_index + 1);
|
||||
#else
|
||||
x--;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
x++;
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Compute the smallest power of 2 that is >= x. */
|
||||
BIT_UTIL_INLINE size_t
|
||||
pow2_ceil_zu(size_t x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return pow2_ceil_u64(x);
|
||||
#else
|
||||
return pow2_ceil_u32(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
BIT_UTIL_INLINE unsigned
|
||||
lg_floor(size_t x) {
|
||||
size_t ret;
|
||||
assert(x != 0);
|
||||
|
||||
asm ("bsr %1, %0"
|
||||
: "=r"(ret) // Outputs.
|
||||
: "r"(x) // Inputs.
|
||||
);
|
||||
assert(ret < UINT_MAX);
|
||||
return (unsigned)ret;
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
BIT_UTIL_INLINE unsigned
|
||||
lg_floor(size_t x) {
|
||||
unsigned long ret;
|
||||
|
||||
assert(x != 0);
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
_BitScanReverse64(&ret, x);
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
_BitScanReverse(&ret, x);
|
||||
#else
|
||||
# error "Unsupported type size for lg_floor()"
|
||||
#endif
|
||||
assert(ret < UINT_MAX);
|
||||
return (unsigned)ret;
|
||||
}
|
||||
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
|
||||
BIT_UTIL_INLINE unsigned
|
||||
lg_floor(size_t x) {
|
||||
assert(x != 0);
|
||||
|
||||
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
|
||||
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
|
||||
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
|
||||
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
|
||||
#else
|
||||
# error "Unsupported type size for lg_floor()"
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
BIT_UTIL_INLINE unsigned
|
||||
lg_floor(size_t x) {
|
||||
assert(x != 0);
|
||||
|
||||
x |= (x >> 1);
|
||||
x |= (x >> 2);
|
||||
x |= (x >> 4);
|
||||
x |= (x >> 8);
|
||||
x |= (x >> 16);
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
x |= (x >> 32);
|
||||
#endif
|
||||
if (x == SIZE_T_MAX) {
|
||||
return (8 << LG_SIZEOF_PTR) - 1;
|
||||
}
|
||||
x++;
|
||||
return ffs_zu(x) - 2;
|
||||
}
|
||||
#endif
|
||||
|
||||
BIT_UTIL_INLINE unsigned
|
||||
lg_ceil(size_t x) {
|
||||
return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
|
||||
}
|
||||
|
||||
#undef BIT_UTIL_INLINE
|
||||
|
||||
/* A compile-time version of lg_floor and lg_ceil. */
|
||||
#define LG_FLOOR_1(x) 0
|
||||
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
|
||||
#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2))
|
||||
#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4))
|
||||
#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
|
||||
#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
|
||||
#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
# define LG_FLOOR(x) LG_FLOOR_32((x))
|
||||
#else
|
||||
# define LG_FLOOR(x) LG_FLOOR_64((x))
|
||||
#endif
|
||||
|
||||
#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1))
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
|
||||
369
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
Normal file
369
deps/jemalloc/include/jemalloc/internal/bitmap.h
vendored
Normal file
@@ -0,0 +1,369 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BITMAP_H
|
||||
#define JEMALLOC_INTERNAL_BITMAP_H
|
||||
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
typedef unsigned long bitmap_t;
|
||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||
|
||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||
#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
|
||||
/* Maximum bitmap bit count is determined by maximum regions per slab. */
|
||||
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
|
||||
#else
|
||||
/* Maximum bitmap bit count is determined by number of extent size classes. */
|
||||
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
|
||||
#endif
|
||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||
|
||||
/* Number of bits per group. */
|
||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
|
||||
/*
|
||||
* Do some analysis on how big the bitmap is before we use a tree. For a brute
|
||||
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
|
||||
* use a tree instead.
|
||||
*/
|
||||
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
|
||||
# define BITMAP_USE_TREE
|
||||
#endif
|
||||
|
||||
/* Number of groups required to store a given number of bits. */
|
||||
#define BITMAP_BITS2GROUPS(nbits) \
|
||||
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
||||
|
||||
/*
|
||||
* Number of groups required at a particular level for a given number of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_L0(nbits) \
|
||||
BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_L1(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
|
||||
#define BITMAP_GROUPS_L2(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
|
||||
#define BITMAP_GROUPS_L3(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS((nbits)))))
|
||||
#define BITMAP_GROUPS_L4(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
|
||||
|
||||
/*
|
||||
* Assuming the number of levels, number of groups required for a given number
|
||||
* of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_1_LEVEL(nbits) \
|
||||
BITMAP_GROUPS_L0(nbits)
|
||||
#define BITMAP_GROUPS_2_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
|
||||
#define BITMAP_GROUPS_3_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
||||
#define BITMAP_GROUPS_5_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
|
||||
|
||||
/*
|
||||
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
|
||||
*/
|
||||
#ifdef BITMAP_USE_TREE
|
||||
|
||||
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
|
||||
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
|
||||
#else
|
||||
# error "Unsupported bitmap size"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum number of levels possible. This could be statically computed based
|
||||
* on LG_BITMAP_MAXBITS:
|
||||
*
|
||||
* #define BITMAP_MAX_LEVELS \
|
||||
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||
*
|
||||
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
|
||||
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
|
||||
* various cascading macros. The only additional cost this incurs is some
|
||||
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
|
||||
* are not impacted.
|
||||
*/
|
||||
#define BITMAP_MAX_LEVELS 5
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* nlevels. */ \
|
||||
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
|
||||
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
|
||||
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
|
||||
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
|
||||
/* levels. */ \
|
||||
{ \
|
||||
{0}, \
|
||||
{BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
|
||||
BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
|
||||
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
|
||||
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
|
||||
+ BITMAP_GROUPS_L0(nbits)} \
|
||||
} \
|
||||
}
|
||||
|
||||
#else /* BITMAP_USE_TREE */
|
||||
|
||||
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
||||
|
||||
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||
/* nbits. */ \
|
||||
nbits, \
|
||||
/* ngroups. */ \
|
||||
BITMAP_BITS2GROUPS(nbits) \
|
||||
}
|
||||
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
|
||||
typedef struct bitmap_level_s {
|
||||
/* Offset of this level's groups within the array of groups. */
|
||||
size_t group_offset;
|
||||
} bitmap_level_t;
|
||||
|
||||
typedef struct bitmap_info_s {
|
||||
/* Logical number of bits in bitmap (stored at bottom level). */
|
||||
size_t nbits;
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Number of levels necessary for nbits. */
|
||||
unsigned nlevels;
|
||||
|
||||
/*
|
||||
* Only the first (nlevels+1) elements are used, and levels are ordered
|
||||
* bottom to top (e.g. the bottom level is stored in levels[0]).
|
||||
*/
|
||||
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
|
||||
#else /* BITMAP_USE_TREE */
|
||||
/* Number of groups necessary for nbits. */
|
||||
size_t ngroups;
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
} bitmap_info_t;
|
||||
|
||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
|
||||
size_t bitmap_size(const bitmap_info_t *binfo);
|
||||
|
||||
static inline bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
bitmap_t rg = bitmap[rgoff];
|
||||
/* The bitmap is full iff the root group is 0. */
|
||||
return (rg == 0);
|
||||
#else
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < binfo->ngroups; i++) {
|
||||
if (bitmap[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
g = bitmap[goff];
|
||||
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
}
|
||||
|
||||
static inline void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (g == 0) {
|
||||
unsigned i;
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
bit = goff;
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (g != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ffu: find first unset >= bit. */
|
||||
static inline size_t
|
||||
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
||||
assert(min_bit < binfo->nbits);
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t bit = 0;
|
||||
for (unsigned level = binfo->nlevels; level--;) {
|
||||
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
|
||||
1));
|
||||
bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
|
||||
>> lg_bits_per_group)];
|
||||
unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
|
||||
bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
|
||||
assert(group_nmask <= BITMAP_GROUP_NBITS);
|
||||
bitmap_t group_mask = ~((1LU << group_nmask) - 1);
|
||||
bitmap_t group_masked = group & group_mask;
|
||||
if (group_masked == 0LU) {
|
||||
if (group == 0LU) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
/*
|
||||
* min_bit was preceded by one or more unset bits in
|
||||
* this group, but there are no other unset bits in this
|
||||
* group. Try again starting at the first bit of the
|
||||
* next sibling. This will recurse at most once per
|
||||
* non-root level.
|
||||
*/
|
||||
size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
|
||||
assert(sib_base > min_bit);
|
||||
assert(sib_base > bit);
|
||||
if (sib_base >= binfo->nbits) {
|
||||
return binfo->nbits;
|
||||
}
|
||||
return bitmap_ffu(bitmap, binfo, sib_base);
|
||||
}
|
||||
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
|
||||
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
|
||||
}
|
||||
assert(bit >= min_bit);
|
||||
assert(bit < binfo->nbits);
|
||||
return bit;
|
||||
#else
|
||||
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
|
||||
bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
|
||||
- 1);
|
||||
size_t bit;
|
||||
do {
|
||||
bit = ffs_lu(g);
|
||||
if (bit != 0) {
|
||||
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||
}
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
} while (i < binfo->ngroups);
|
||||
return binfo->nbits;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* sfu: set first unset. */
|
||||
static inline size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
size_t bit;
|
||||
bitmap_t g;
|
||||
unsigned i;
|
||||
|
||||
assert(!bitmap_full(bitmap, binfo));
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
i = binfo->nlevels - 1;
|
||||
g = bitmap[binfo->levels[i].group_offset];
|
||||
bit = ffs_lu(g) - 1;
|
||||
while (i > 0) {
|
||||
i--;
|
||||
g = bitmap[binfo->levels[i].group_offset + bit];
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
|
||||
}
|
||||
#else
|
||||
i = 0;
|
||||
g = bitmap[0];
|
||||
while ((bit = ffs_lu(g)) == 0) {
|
||||
i++;
|
||||
g = bitmap[i];
|
||||
}
|
||||
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||
#endif
|
||||
bitmap_set(bitmap, binfo, bit);
|
||||
return bit;
|
||||
}
|
||||
|
||||
static inline void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
UNUSED bool propagate;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
#ifdef BITMAP_USE_TREE
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (propagate) {
|
||||
unsigned i;
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
bit = goff;
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[binfo->levels[i].group_offset + goff];
|
||||
g = *gp;
|
||||
propagate = (g == 0);
|
||||
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
|
||||
== 0);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (!propagate) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BITMAP_H */
|
||||
131
deps/jemalloc/include/jemalloc/internal/cache_bin.h
vendored
Normal file
131
deps/jemalloc/include/jemalloc/internal/cache_bin.h
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
#define JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
/*
|
||||
* The cache_bins are the mechanism that the tcache and the arena use to
|
||||
* communicate. The tcache fills from and flushes to the arena by passing a
|
||||
* cache_bin_t to fill/flush. When the arena needs to pull stats from the
|
||||
* tcaches associated with it, it does so by iterating over its
|
||||
* cache_bin_array_descriptor_t objects and reading out per-bin stats it
|
||||
* contains. This makes it so that the arena need not know about the existence
|
||||
* of the tcache at all.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* The count of the number of cached allocations in a bin. We make this signed
|
||||
* so that negative numbers can encode "invalid" states (e.g. a low water mark
|
||||
* of -1 for a cache that has been depleted).
|
||||
*/
|
||||
typedef int32_t cache_bin_sz_t;
|
||||
|
||||
typedef struct cache_bin_stats_s cache_bin_stats_t;
|
||||
struct cache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
};
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
typedef struct cache_bin_info_s cache_bin_info_t;
|
||||
struct cache_bin_info_s {
|
||||
/* Upper limit on ncached. */
|
||||
cache_bin_sz_t ncached_max;
|
||||
};
|
||||
|
||||
typedef struct cache_bin_s cache_bin_t;
|
||||
struct cache_bin_s {
|
||||
/* Min # cached since last GC. */
|
||||
cache_bin_sz_t low_water;
|
||||
/* # of cached objects. */
|
||||
cache_bin_sz_t ncached;
|
||||
/*
|
||||
* ncached and stats are both modified frequently. Let's keep them
|
||||
* close so that they have a higher chance of being on the same
|
||||
* cacheline, thus less write-backs.
|
||||
*/
|
||||
cache_bin_stats_t tstats;
|
||||
/*
|
||||
* Stack of available objects.
|
||||
*
|
||||
* To make use of adjacent cacheline prefetch, the items in the avail
|
||||
* stack goes to higher address for newer allocations. avail points
|
||||
* just above the available space, which means that
|
||||
* avail[-ncached, ... -1] are available items and the lowest item will
|
||||
* be allocated first.
|
||||
*/
|
||||
void **avail;
|
||||
};
|
||||
|
||||
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
|
||||
struct cache_bin_array_descriptor_s {
|
||||
/*
|
||||
* The arena keeps a list of the cache bins associated with it, for
|
||||
* stats collection.
|
||||
*/
|
||||
ql_elm(cache_bin_array_descriptor_t) link;
|
||||
/* Pointers to the tcache bins. */
|
||||
cache_bin_t *bins_small;
|
||||
cache_bin_t *bins_large;
|
||||
};
|
||||
|
||||
static inline void
|
||||
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
|
||||
cache_bin_t *bins_small, cache_bin_t *bins_large) {
|
||||
ql_elm_new(descriptor, link);
|
||||
descriptor->bins_small = bins_small;
|
||||
descriptor->bins_large = bins_large;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
|
||||
void *ret;
|
||||
|
||||
bin->ncached--;
|
||||
|
||||
/*
|
||||
* Check for both bin->ncached == 0 and ncached < low_water
|
||||
* in a single branch.
|
||||
*/
|
||||
if (unlikely(bin->ncached <= bin->low_water)) {
|
||||
bin->low_water = bin->ncached;
|
||||
if (bin->ncached == -1) {
|
||||
bin->ncached = 0;
|
||||
*success = false;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* success (instead of ret) should be checked upon the return of this
|
||||
* function. We avoid checking (ret == NULL) because there is never a
|
||||
* null stored on the avail stack (which is unknown to the compiler),
|
||||
* and eagerly checking ret would cause pipeline stall (waiting for the
|
||||
* cacheline).
|
||||
*/
|
||||
*success = true;
|
||||
ret = *(bin->avail - (bin->ncached + 1));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) {
|
||||
if (unlikely(bin->ncached == bin_info->ncached_max)) {
|
||||
return false;
|
||||
}
|
||||
assert(bin->ncached < bin_info->ncached_max);
|
||||
bin->ncached++;
|
||||
*(bin->avail - bin->ncached) = ptr;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
|
||||
63
deps/jemalloc/include/jemalloc/internal/chunk.h
vendored
Normal file
63
deps/jemalloc/include/jemalloc/internal/chunk.h
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/*
|
||||
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
||||
* memory system.
|
||||
*/
|
||||
#define LG_CHUNK_DEFAULT 22
|
||||
|
||||
/* Return the chunk address for allocation address a. */
|
||||
#define CHUNK_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~chunksize_mask))
|
||||
|
||||
/* Return the chunk offset of address a. */
|
||||
#define CHUNK_ADDR2OFFSET(a) \
|
||||
((size_t)((uintptr_t)(a) & chunksize_mask))
|
||||
|
||||
/* Return the smallest chunk multiple that is >= s. */
|
||||
#define CHUNK_CEILING(s) \
|
||||
(((s) + chunksize_mask) & ~chunksize_mask)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern size_t opt_lg_chunk;
|
||||
extern const char *opt_dss;
|
||||
|
||||
/* Protects stats_chunks; currently not used for any other purpose. */
|
||||
extern malloc_mutex_t chunks_mtx;
|
||||
/* Chunk statistics. */
|
||||
extern chunk_stats_t stats_chunks;
|
||||
|
||||
extern rtree_t *chunks_rtree;
|
||||
|
||||
extern size_t chunksize;
|
||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||
extern size_t chunk_npages;
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
dss_prec_t dss_prec);
|
||||
void chunk_unmap(void *chunk, size_t size);
|
||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||
bool chunk_boot(void);
|
||||
void chunk_prefork(void);
|
||||
void chunk_postfork_parent(void);
|
||||
void chunk_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/chunk_dss.h"
|
||||
#include "jemalloc/internal/chunk_mmap.h"
|
||||
38
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
vendored
Normal file
38
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef enum {
|
||||
dss_prec_disabled = 0,
|
||||
dss_prec_primary = 1,
|
||||
dss_prec_secondary = 2,
|
||||
|
||||
dss_prec_limit = 3
|
||||
} dss_prec_t;
|
||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||
#define DSS_DEFAULT "secondary"
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
extern const char *dss_prec_names[];
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
dss_prec_t chunk_dss_prec_get(void);
|
||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
||||
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_in_dss(void *chunk);
|
||||
bool chunk_dss_boot(void);
|
||||
void chunk_dss_prefork(void);
|
||||
void chunk_dss_postfork_parent(void);
|
||||
void chunk_dss_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
22
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
vendored
Normal file
22
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
bool pages_purge(void *addr, size_t length);
|
||||
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
101
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
Normal file
101
deps/jemalloc/include/jemalloc/internal/ckh.h
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
#ifndef JEMALLOC_INTERNAL_CKH_H
|
||||
#define JEMALLOC_INTERNAL_CKH_H
|
||||
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/* Cuckoo hashing implementation. Skip to the end for the interface. */
|
||||
|
||||
/******************************************************************************/
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
|
||||
/* Maintain counters used to get an idea of performance. */
|
||||
/* #define CKH_COUNT */
|
||||
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
|
||||
/* #define CKH_VERBOSE */
|
||||
|
||||
/*
|
||||
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
|
||||
* one bucket per L1 cache line.
|
||||
*/
|
||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||
|
||||
/* Typedefs to allow easy function pointer passing. */
|
||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||
|
||||
/* Hash table cell. */
|
||||
typedef struct {
|
||||
const void *key;
|
||||
const void *data;
|
||||
} ckhc_t;
|
||||
|
||||
/* The hash table itself. */
|
||||
typedef struct {
|
||||
#ifdef CKH_COUNT
|
||||
/* Counters used to get an idea of performance. */
|
||||
uint64_t ngrows;
|
||||
uint64_t nshrinks;
|
||||
uint64_t nshrinkfails;
|
||||
uint64_t ninserts;
|
||||
uint64_t nrelocs;
|
||||
#endif
|
||||
|
||||
/* Used for pseudo-random number generation. */
|
||||
uint64_t prng_state;
|
||||
|
||||
/* Total number of items. */
|
||||
size_t count;
|
||||
|
||||
/*
|
||||
* Minimum and current number of hash table buckets. There are
|
||||
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
|
||||
*/
|
||||
unsigned lg_minbuckets;
|
||||
unsigned lg_curbuckets;
|
||||
|
||||
/* Hash and comparison functions. */
|
||||
ckh_hash_t *hash;
|
||||
ckh_keycomp_t *keycomp;
|
||||
|
||||
/* Hash table with 2^lg_curbuckets buckets. */
|
||||
ckhc_t *tab;
|
||||
} ckh_t;
|
||||
|
||||
/******************************************************************************/
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
/* Lifetime management. Minitems is the initial capacity. */
|
||||
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp);
|
||||
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/* Get the number of elements in the set. */
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
|
||||
/*
|
||||
* To iterate over the elements in the table, initialize *tabind to 0 and call
|
||||
* this function until it returns true. Each call that returns false will
|
||||
* update *key and *data to the next element in the table, assuming the pointers
|
||||
* are non-NULL.
|
||||
*/
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
|
||||
/*
|
||||
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
|
||||
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
|
||||
* the key and value, and doesn't do any lifetime management.
|
||||
*/
|
||||
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data);
|
||||
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
|
||||
|
||||
/* Some useful hash and comparison functions for strings and pointers. */
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CKH_H */
|
||||
134
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
Normal file
134
deps/jemalloc/include/jemalloc/internal/ctl.h
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
#ifndef JEMALLOC_INTERNAL_CTL_H
|
||||
#define JEMALLOC_INTERNAL_CTL_H
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
|
||||
/* Maximum ctl tree depth. */
|
||||
#define CTL_MAX_DEPTH 7
|
||||
|
||||
typedef struct ctl_node_s {
|
||||
bool named;
|
||||
} ctl_node_t;
|
||||
|
||||
typedef struct ctl_named_node_s {
|
||||
ctl_node_t node;
|
||||
const char *name;
|
||||
/* If (nchildren == 0), this is a terminal node. */
|
||||
size_t nchildren;
|
||||
const ctl_node_t *children;
|
||||
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
|
||||
size_t);
|
||||
} ctl_named_node_t;
|
||||
|
||||
typedef struct ctl_indexed_node_s {
|
||||
struct ctl_node_s node;
|
||||
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
|
||||
size_t);
|
||||
} ctl_indexed_node_t;
|
||||
|
||||
typedef struct ctl_arena_stats_s {
|
||||
arena_stats_t astats;
|
||||
|
||||
/* Aggregate stats for small size classes, based on bin stats. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
uint64_t nfills_small;
|
||||
uint64_t nflushes_small;
|
||||
|
||||
bin_stats_t bstats[SC_NBINS];
|
||||
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
|
||||
arena_stats_extents_t estats[SC_NPSIZES];
|
||||
} ctl_arena_stats_t;
|
||||
|
||||
typedef struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t metadata;
|
||||
size_t metadata_thp;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
size_t retained;
|
||||
|
||||
background_thread_stats_t background_thread;
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
|
||||
} ctl_stats_t;
|
||||
|
||||
typedef struct ctl_arena_s ctl_arena_t;
|
||||
struct ctl_arena_s {
|
||||
unsigned arena_ind;
|
||||
bool initialized;
|
||||
ql_elm(ctl_arena_t) destroyed_link;
|
||||
|
||||
/* Basic stats, supported even if !config_stats. */
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
ssize_t dirty_decay_ms;
|
||||
ssize_t muzzy_decay_ms;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
size_t pmuzzy;
|
||||
|
||||
/* NULL if !config_stats. */
|
||||
ctl_arena_stats_t *astats;
|
||||
};
|
||||
|
||||
typedef struct ctl_arenas_s {
|
||||
uint64_t epoch;
|
||||
unsigned narenas;
|
||||
ql_head(ctl_arena_t) destroyed;
|
||||
|
||||
/*
|
||||
* Element 0 corresponds to merged stats for extant arenas (accessed via
|
||||
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
|
||||
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
|
||||
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
|
||||
*/
|
||||
ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
|
||||
} ctl_arenas_t;
|
||||
|
||||
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen);
|
||||
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
|
||||
|
||||
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
bool ctl_boot(void);
|
||||
void ctl_prefork(tsdn_t *tsdn);
|
||||
void ctl_postfork_parent(tsdn_t *tsdn);
|
||||
void ctl_postfork_child(tsdn_t *tsdn);
|
||||
|
||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||
!= 0) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
|
||||
name); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
|
||||
malloc_printf("<jemalloc>: Failure in " \
|
||||
"xmallctlnametomib(\"%s\", ...)\n", name); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
|
||||
newlen) != 0) { \
|
||||
malloc_write( \
|
||||
"<jemalloc>: Failure in xmallctlbymib()\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CTL_H */
|
||||
41
deps/jemalloc/include/jemalloc/internal/div.h
vendored
Normal file
41
deps/jemalloc/include/jemalloc/internal/div.h
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
#ifndef JEMALLOC_INTERNAL_DIV_H
|
||||
#define JEMALLOC_INTERNAL_DIV_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* This module does the division that computes the index of a region in a slab,
|
||||
* given its offset relative to the base.
|
||||
* That is, given a divisor d, an n = i * d (all integers), we'll return i.
|
||||
* We do some pre-computation to do this more quickly than a CPU division
|
||||
* instruction.
|
||||
* We bound n < 2^32, and don't support dividing by one.
|
||||
*/
|
||||
|
||||
typedef struct div_info_s div_info_t;
|
||||
struct div_info_s {
|
||||
uint32_t magic;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
size_t d;
|
||||
#endif
|
||||
};
|
||||
|
||||
void div_init(div_info_t *div_info, size_t divisor);
|
||||
|
||||
static inline size_t
|
||||
div_compute(div_info_t *div_info, size_t n) {
|
||||
assert(n <= (uint32_t)-1);
|
||||
/*
|
||||
* This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
|
||||
* the compilers I tried were all smart enough to turn this into the
|
||||
* appropriate "get the high 32 bits of the result of a multiply" (e.g.
|
||||
* mul; mov edx eax; on x86, umull on arm, etc.).
|
||||
*/
|
||||
size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
assert(i * div_info->d == n);
|
||||
#endif
|
||||
return i;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DIV_H */
|
||||
486
deps/jemalloc/include/jemalloc/internal/emitter.h
vendored
Normal file
486
deps/jemalloc/include/jemalloc/internal/emitter.h
vendored
Normal file
@@ -0,0 +1,486 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EMITTER_H
|
||||
#define JEMALLOC_INTERNAL_EMITTER_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
typedef enum emitter_output_e emitter_output_t;
|
||||
enum emitter_output_e {
|
||||
emitter_output_json,
|
||||
emitter_output_table
|
||||
};
|
||||
|
||||
typedef enum emitter_justify_e emitter_justify_t;
|
||||
enum emitter_justify_e {
|
||||
emitter_justify_left,
|
||||
emitter_justify_right,
|
||||
/* Not for users; just to pass to internal functions. */
|
||||
emitter_justify_none
|
||||
};
|
||||
|
||||
typedef enum emitter_type_e emitter_type_t;
|
||||
enum emitter_type_e {
|
||||
emitter_type_bool,
|
||||
emitter_type_int,
|
||||
emitter_type_unsigned,
|
||||
emitter_type_uint32,
|
||||
emitter_type_uint64,
|
||||
emitter_type_size,
|
||||
emitter_type_ssize,
|
||||
emitter_type_string,
|
||||
/*
|
||||
* A title is a column title in a table; it's just a string, but it's
|
||||
* not quoted.
|
||||
*/
|
||||
emitter_type_title,
|
||||
};
|
||||
|
||||
typedef struct emitter_col_s emitter_col_t;
|
||||
struct emitter_col_s {
|
||||
/* Filled in by the user. */
|
||||
emitter_justify_t justify;
|
||||
int width;
|
||||
emitter_type_t type;
|
||||
union {
|
||||
bool bool_val;
|
||||
int int_val;
|
||||
unsigned unsigned_val;
|
||||
uint32_t uint32_val;
|
||||
uint32_t uint32_t_val;
|
||||
uint64_t uint64_val;
|
||||
uint64_t uint64_t_val;
|
||||
size_t size_val;
|
||||
ssize_t ssize_val;
|
||||
const char *str_val;
|
||||
};
|
||||
|
||||
/* Filled in by initialization. */
|
||||
ql_elm(emitter_col_t) link;
|
||||
};
|
||||
|
||||
typedef struct emitter_row_s emitter_row_t;
|
||||
struct emitter_row_s {
|
||||
ql_head(emitter_col_t) cols;
|
||||
};
|
||||
|
||||
typedef struct emitter_s emitter_t;
|
||||
struct emitter_s {
|
||||
emitter_output_t output;
|
||||
/* The output information. */
|
||||
void (*write_cb)(void *, const char *);
|
||||
void *cbopaque;
|
||||
int nesting_depth;
|
||||
/* True if we've already emitted a value at the given depth. */
|
||||
bool item_at_depth;
|
||||
/* True if we emitted a key and will emit corresponding value next. */
|
||||
bool emitted_key;
|
||||
};
|
||||
|
||||
/* Internal convenience function. Write to the emitter the given string. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_printf(emitter_t *emitter, const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
static inline const char * JEMALLOC_FORMAT_ARG(3)
|
||||
emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
|
||||
emitter_justify_t justify, int width) {
|
||||
size_t written;
|
||||
fmt_specifier++;
|
||||
if (justify == emitter_justify_none) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%s", fmt_specifier);
|
||||
} else if (justify == emitter_justify_left) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%-%d%s", width, fmt_specifier);
|
||||
} else {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%d%s", width, fmt_specifier);
|
||||
}
|
||||
/* Only happens in case of bad format string, which *we* choose. */
|
||||
assert(written < out_size);
|
||||
return out_fmt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal. Emit the given value type in the relevant encoding (so that the
|
||||
* bool true gets mapped to json "true", but the string "true" gets mapped to
|
||||
* json "\"true\"", for instance.
|
||||
*
|
||||
* Width is ignored if justify is emitter_justify_none.
|
||||
*/
|
||||
static inline void
|
||||
emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
size_t str_written;
|
||||
#define BUF_SIZE 256
|
||||
#define FMT_SIZE 10
|
||||
/*
|
||||
* We dynamically generate a format string to emit, to let us use the
|
||||
* snprintf machinery. This is kinda hacky, but gets the job done
|
||||
* quickly without having to think about the various snprintf edge
|
||||
* cases.
|
||||
*/
|
||||
char fmt[FMT_SIZE];
|
||||
char buf[BUF_SIZE];
|
||||
|
||||
#define EMIT_SIMPLE(type, format) \
|
||||
emitter_printf(emitter, \
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
|
||||
*(const type *)value);
|
||||
|
||||
switch (value_type) {
|
||||
case emitter_type_bool:
|
||||
emitter_printf(emitter,
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
|
||||
*(const bool *)value ? "true" : "false");
|
||||
break;
|
||||
case emitter_type_int:
|
||||
EMIT_SIMPLE(int, "%d")
|
||||
break;
|
||||
case emitter_type_unsigned:
|
||||
EMIT_SIMPLE(unsigned, "%u")
|
||||
break;
|
||||
case emitter_type_ssize:
|
||||
EMIT_SIMPLE(ssize_t, "%zd")
|
||||
break;
|
||||
case emitter_type_size:
|
||||
EMIT_SIMPLE(size_t, "%zu")
|
||||
break;
|
||||
case emitter_type_string:
|
||||
str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
|
||||
*(const char *const *)value);
|
||||
/*
|
||||
* We control the strings we output; we shouldn't get anything
|
||||
* anywhere near the fmt size.
|
||||
*/
|
||||
assert(str_written < BUF_SIZE);
|
||||
emitter_printf(emitter,
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
|
||||
break;
|
||||
case emitter_type_uint32:
|
||||
EMIT_SIMPLE(uint32_t, "%" FMTu32)
|
||||
break;
|
||||
case emitter_type_uint64:
|
||||
EMIT_SIMPLE(uint64_t, "%" FMTu64)
|
||||
break;
|
||||
case emitter_type_title:
|
||||
EMIT_SIMPLE(char *const, "%s");
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
#undef BUF_SIZE
|
||||
#undef FMT_SIZE
|
||||
}
|
||||
|
||||
|
||||
/* Internal functions. In json mode, tracks nesting state. */
|
||||
static inline void
|
||||
emitter_nest_inc(emitter_t *emitter) {
|
||||
emitter->nesting_depth++;
|
||||
emitter->item_at_depth = false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_nest_dec(emitter_t *emitter) {
|
||||
emitter->nesting_depth--;
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_indent(emitter_t *emitter) {
|
||||
int amount = emitter->nesting_depth;
|
||||
const char *indent_str;
|
||||
if (emitter->output == emitter_output_json) {
|
||||
indent_str = "\t";
|
||||
} else {
|
||||
amount *= 2;
|
||||
indent_str = " ";
|
||||
}
|
||||
for (int i = 0; i < amount; i++) {
|
||||
emitter_printf(emitter, "%s", indent_str);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_key_prefix(emitter_t *emitter) {
|
||||
if (emitter->emitted_key) {
|
||||
emitter->emitted_key = false;
|
||||
return;
|
||||
}
|
||||
emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : "");
|
||||
emitter_indent(emitter);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* Public functions for emitter_t. */
|
||||
|
||||
static inline void
|
||||
emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
|
||||
void (*write_cb)(void *, const char *), void *cbopaque) {
|
||||
emitter->output = emitter_output;
|
||||
emitter->write_cb = write_cb;
|
||||
emitter->cbopaque = cbopaque;
|
||||
emitter->item_at_depth = false;
|
||||
emitter->emitted_key = false;
|
||||
emitter->nesting_depth = 0;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* JSON public API. */
|
||||
|
||||
/*
|
||||
* Emits a key (e.g. as appears in an object). The next json entity emitted will
|
||||
* be the corresponding value.
|
||||
*/
|
||||
static inline void
|
||||
emitter_json_key(emitter_t *emitter, const char *json_key) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\": ", json_key);
|
||||
emitter->emitted_key = true;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
|
||||
const void *value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Shorthand for calling emitter_json_key and then emitter_json_value. */
|
||||
static inline void
|
||||
emitter_json_kv(emitter_t *emitter, const char *json_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_value(emitter, value_type, value);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_array_begin(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "[");
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */
|
||||
static inline void
|
||||
emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_array_begin(emitter);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_array_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "]");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_object_begin(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */
|
||||
static inline void
|
||||
emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_object_begin(emitter);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_object_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "}");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Table public API. */
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s\n", table_key);
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_nest_dec(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_kv_note(emitter_t *emitter, const char *table_key,
|
||||
emitter_type_t value_type, const void *value,
|
||||
const char *table_note_key, emitter_type_t table_note_value_type,
|
||||
const void *table_note_value) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s: ", table_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
if (table_note_key != NULL) {
|
||||
emitter_printf(emitter, " (%s: ", table_note_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
table_note_value_type, table_note_value);
|
||||
emitter_printf(emitter, ")");
|
||||
}
|
||||
emitter_printf(emitter, "\n");
|
||||
}
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_kv(emitter_t *emitter, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_table_kv_note(emitter, table_key, value_type, value, NULL,
|
||||
emitter_type_bool, NULL);
|
||||
}
|
||||
|
||||
|
||||
/* Write to the emitter the given string, but only in table mode. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_table_printf(emitter_t *emitter, const char *format, ...) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
va_list ap;
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
|
||||
if (emitter->output != emitter_output_table) {
|
||||
return;
|
||||
}
|
||||
emitter_col_t *col;
|
||||
ql_foreach(col, &row->cols, link) {
|
||||
emitter_print_value(emitter, col->justify, col->width,
|
||||
col->type, (const void *)&col->bool_val);
|
||||
}
|
||||
emitter_table_printf(emitter, "\n");
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_row_init(emitter_row_t *row) {
|
||||
ql_new(&row->cols);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
|
||||
ql_elm_new(col, link);
|
||||
ql_tail_insert(&row->cols, col, link);
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Generalized public API. Emits using either JSON or table, according to
|
||||
* settings in the emitter_t. */
|
||||
|
||||
/*
|
||||
* Note emits a different kv pair as well, but only in table mode. Omits the
|
||||
* note if table_note_key is NULL.
|
||||
*/
|
||||
static inline void
|
||||
emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value,
|
||||
const char *table_note_key, emitter_type_t table_note_value_type,
|
||||
const void *table_note_value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_value(emitter, value_type, value);
|
||||
} else {
|
||||
emitter_table_kv_note(emitter, table_key, value_type, value,
|
||||
table_note_key, table_note_value_type, table_note_value);
|
||||
}
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
|
||||
emitter_type_bool, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_begin(emitter_t *emitter, const char *json_key,
|
||||
const char *table_header) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key(emitter, json_key);
|
||||
emitter_json_object_begin(emitter);
|
||||
} else {
|
||||
emitter_table_dict_begin(emitter, table_header);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_object_end(emitter);
|
||||
} else {
|
||||
emitter_table_dict_end(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_begin(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth == 0);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
} else {
|
||||
/*
|
||||
* This guarantees that we always call write_cb at least once.
|
||||
* This is useful if some invariant is established by each call
|
||||
* to write_cb, but doesn't hold initially: e.g., some buffer
|
||||
* holds a null-terminated string.
|
||||
*/
|
||||
emitter_printf(emitter, "%s", "");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth == 1);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n}\n");
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EMITTER_H */
|
||||
46
deps/jemalloc/include/jemalloc/internal/extent.h
vendored
Normal file
46
deps/jemalloc/include/jemalloc/internal/extent.h
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct extent_node_s extent_node_t;
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
/* Tree of extents. */
|
||||
struct extent_node_s {
|
||||
/* Linkage for the size/address-ordered tree. */
|
||||
rb_node(extent_node_t) link_szad;
|
||||
|
||||
/* Linkage for the address-ordered tree. */
|
||||
rb_node(extent_node_t) link_ad;
|
||||
|
||||
/* Profile counters, used for huge objects. */
|
||||
prof_ctx_t *prof_ctx;
|
||||
|
||||
/* Pointer to the extent that this tree node is responsible for. */
|
||||
void *addr;
|
||||
|
||||
/* Total region size. */
|
||||
size_t size;
|
||||
|
||||
/* True if zero-filled; used by chunk recycling code. */
|
||||
bool zeroed;
|
||||
};
|
||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
||||
|
||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
26
deps/jemalloc/include/jemalloc/internal/extent_dss.h
vendored
Normal file
26
deps/jemalloc/include/jemalloc/internal/extent_dss.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
|
||||
|
||||
typedef enum {
|
||||
dss_prec_disabled = 0,
|
||||
dss_prec_primary = 1,
|
||||
dss_prec_secondary = 2,
|
||||
|
||||
dss_prec_limit = 3
|
||||
} dss_prec_t;
|
||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||
#define DSS_DEFAULT "secondary"
|
||||
|
||||
extern const char *dss_prec_names[];
|
||||
|
||||
extern const char *opt_dss;
|
||||
|
||||
dss_prec_t extent_dss_prec_get(void);
|
||||
bool extent_dss_prec_set(dss_prec_t dss_prec);
|
||||
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit);
|
||||
bool extent_in_dss(void *addr);
|
||||
bool extent_dss_mergeable(void *addr_a, void *addr_b);
|
||||
void extent_dss_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
|
||||
83
deps/jemalloc/include/jemalloc/internal/extent_externs.h
vendored
Normal file
83
deps/jemalloc/include/jemalloc/internal/extent_externs.h
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_pool.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
extern size_t opt_lg_extent_max_active_fit;
|
||||
|
||||
extern rtree_t extents_rtree;
|
||||
extern const extent_hooks_t extent_hooks_default;
|
||||
extern mutex_pool_t extent_mutex_pool;
|
||||
|
||||
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
|
||||
extent_hooks_t *extent_hooks_get(arena_t *arena);
|
||||
extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
|
||||
extent_hooks_t *extent_hooks);
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t extent_size_quantize_floor(size_t size);
|
||||
size_t extent_size_quantize_ceil(size_t size);
|
||||
#endif
|
||||
|
||||
ph_proto(, extent_avail_, extent_tree_t, extent_t)
|
||||
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
||||
|
||||
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
|
||||
bool delay_coalesce);
|
||||
extent_state_t extents_state_get(const extents_t *extents);
|
||||
size_t extents_npages_get(extents_t *extents);
|
||||
/* Get the number of extents in the given page size index. */
|
||||
size_t extents_nextents_get(extents_t *extents, pszind_t ind);
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
size_t extents_nbytes_get(extents_t *extents, pszind_t ind);
|
||||
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
|
||||
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
|
||||
bool *zero, bool *commit);
|
||||
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
|
||||
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
|
||||
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
|
||||
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
|
||||
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
|
||||
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
|
||||
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length);
|
||||
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
||||
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
|
||||
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
|
||||
|
||||
bool extent_boot(void);
|
||||
|
||||
void extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
|
||||
size_t *nfree, size_t *nregs, size_t *size);
|
||||
void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
|
||||
size_t *nfree, size_t *nregs, size_t *size,
|
||||
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
|
||||
501
deps/jemalloc/include/jemalloc/internal/extent_inlines.h
vendored
Normal file
501
deps/jemalloc/include/jemalloc/internal/extent_inlines.h
vendored
Normal file
@@ -0,0 +1,501 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_pool.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/prng.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
static inline void
|
||||
extent_lock(tsdn_t *tsdn, extent_t *extent) {
|
||||
assert(extent != NULL);
|
||||
mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_unlock(tsdn_t *tsdn, extent_t *extent) {
|
||||
assert(extent != NULL);
|
||||
mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
||||
assert(extent1 != NULL && extent2 != NULL);
|
||||
mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
||||
(uintptr_t)extent2);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
||||
assert(extent1 != NULL && extent2 != NULL);
|
||||
mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
||||
(uintptr_t)extent2);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
extent_arena_ind_get(const extent_t *extent) {
|
||||
unsigned arena_ind = (unsigned)((extent->e_bits &
|
||||
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
|
||||
assert(arena_ind < MALLOCX_ARENA_LIMIT);
|
||||
|
||||
return arena_ind;
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
extent_arena_get(const extent_t *extent) {
|
||||
unsigned arena_ind = extent_arena_ind_get(extent);
|
||||
|
||||
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
static inline szind_t
|
||||
extent_szind_get_maybe_invalid(const extent_t *extent) {
|
||||
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
|
||||
EXTENT_BITS_SZIND_SHIFT);
|
||||
assert(szind <= SC_NSIZES);
|
||||
return szind;
|
||||
}
|
||||
|
||||
static inline szind_t
|
||||
extent_szind_get(const extent_t *extent) {
|
||||
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
||||
assert(szind < SC_NSIZES); /* Never call when "invalid". */
|
||||
return szind;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
extent_usize_get(const extent_t *extent) {
|
||||
return sz_index2size(extent_szind_get(extent));
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
extent_binshard_get(const extent_t *extent) {
|
||||
unsigned binshard = (unsigned)((extent->e_bits &
|
||||
EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
|
||||
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
||||
return binshard;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
extent_sn_get(const extent_t *extent) {
|
||||
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
|
||||
EXTENT_BITS_SN_SHIFT);
|
||||
}
|
||||
|
||||
static inline extent_state_t
|
||||
extent_state_get(const extent_t *extent) {
|
||||
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
|
||||
EXTENT_BITS_STATE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_zeroed_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
|
||||
EXTENT_BITS_ZEROED_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_committed_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
|
||||
EXTENT_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_dumpable_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
|
||||
EXTENT_BITS_DUMPABLE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_slab_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
||||
EXTENT_BITS_SLAB_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
extent_nfree_get(const extent_t *extent) {
|
||||
assert(extent_slab_get(extent));
|
||||
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
|
||||
EXTENT_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
extent_base_get(const extent_t *extent) {
|
||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||
!extent_slab_get(extent));
|
||||
return PAGE_ADDR2BASE(extent->e_addr);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
extent_addr_get(const extent_t *extent) {
|
||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||
!extent_slab_get(extent));
|
||||
return extent->e_addr;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
extent_size_get(const extent_t *extent) {
|
||||
return (extent->e_size_esn & EXTENT_SIZE_MASK);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
extent_esn_get(const extent_t *extent) {
|
||||
return (extent->e_size_esn & EXTENT_ESN_MASK);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
extent_bsize_get(const extent_t *extent) {
|
||||
return extent->e_bsize;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
extent_before_get(const extent_t *extent) {
|
||||
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
extent_last_get(const extent_t *extent) {
|
||||
return (void *)((uintptr_t)extent_base_get(extent) +
|
||||
extent_size_get(extent) - PAGE);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
extent_past_get(const extent_t *extent) {
|
||||
return (void *)((uintptr_t)extent_base_get(extent) +
|
||||
extent_size_get(extent));
|
||||
}
|
||||
|
||||
static inline arena_slab_data_t *
|
||||
extent_slab_data_get(extent_t *extent) {
|
||||
assert(extent_slab_get(extent));
|
||||
return &extent->e_slab_data;
|
||||
}
|
||||
|
||||
static inline const arena_slab_data_t *
|
||||
extent_slab_data_get_const(const extent_t *extent) {
|
||||
assert(extent_slab_get(extent));
|
||||
return &extent->e_slab_data;
|
||||
}
|
||||
|
||||
static inline prof_tctx_t *
|
||||
extent_prof_tctx_get(const extent_t *extent) {
|
||||
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
static inline nstime_t
|
||||
extent_prof_alloc_time_get(const extent_t *extent) {
|
||||
return extent->e_alloc_time;
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_arena_set(extent_t *extent, arena_t *arena) {
|
||||
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
|
||||
MALLOCX_ARENA_BITS) - 1);
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
|
||||
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_binshard_set(extent_t *extent, unsigned binshard) {
|
||||
/* The assertion assumes szind is set already. */
|
||||
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
|
||||
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_addr_set(extent_t *extent, void *addr) {
|
||||
extent->e_addr = addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
assert(extent_base_get(extent) == extent_addr_get(extent));
|
||||
|
||||
if (alignment < PAGE) {
|
||||
unsigned lg_range = LG_PAGE -
|
||||
lg_floor(CACHELINE_CEILING(alignment));
|
||||
size_t r;
|
||||
if (!tsdn_null(tsdn)) {
|
||||
tsd_t *tsd = tsdn_tsd(tsdn);
|
||||
r = (size_t)prng_lg_range_u64(
|
||||
tsd_offset_statep_get(tsd), lg_range);
|
||||
} else {
|
||||
r = prng_lg_range_zu(
|
||||
&extent_arena_get(extent)->offset_state,
|
||||
lg_range, true);
|
||||
}
|
||||
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
||||
lg_range);
|
||||
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
||||
random_offset);
|
||||
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
|
||||
extent->e_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_size_set(extent_t *extent, size_t size) {
|
||||
assert((size & ~EXTENT_SIZE_MASK) == 0);
|
||||
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_esn_set(extent_t *extent, size_t esn) {
|
||||
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
|
||||
EXTENT_ESN_MASK);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_bsize_set(extent_t *extent, size_t bsize) {
|
||||
extent->e_bsize = bsize;
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_szind_set(extent_t *extent, szind_t szind) {
|
||||
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
|
||||
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_nfree_set(extent_t *extent, unsigned nfree) {
|
||||
assert(extent_slab_get(extent));
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
|
||||
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
|
||||
/* The assertion assumes szind is set already. */
|
||||
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
||||
extent->e_bits = (extent->e_bits &
|
||||
(~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
|
||||
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
|
||||
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_nfree_inc(extent_t *extent) {
|
||||
assert(extent_slab_get(extent));
|
||||
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_nfree_dec(extent_t *extent) {
|
||||
assert(extent_slab_get(extent));
|
||||
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_nfree_sub(extent_t *extent, uint64_t n) {
|
||||
assert(extent_slab_get(extent));
|
||||
extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_sn_set(extent_t *extent, size_t sn) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
|
||||
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_state_set(extent_t *extent, extent_state_t state) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
|
||||
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_zeroed_set(extent_t *extent, bool zeroed) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
|
||||
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_committed_set(extent_t *extent, bool committed) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
|
||||
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_dumpable_set(extent_t *extent, bool dumpable) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
|
||||
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_slab_set(extent_t *extent, bool slab) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
||||
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
|
||||
nstime_copy(&extent->e_alloc_time, &t);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_is_head_get(extent_t *extent) {
|
||||
if (maps_coalesce) {
|
||||
not_reached();
|
||||
}
|
||||
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
|
||||
EXTENT_BITS_IS_HEAD_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_is_head_set(extent_t *extent, bool is_head) {
|
||||
if (maps_coalesce) {
|
||||
not_reached();
|
||||
}
|
||||
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
|
||||
((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed, bool dumpable, extent_head_state_t is_head) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
extent_arena_set(extent, arena);
|
||||
extent_addr_set(extent, addr);
|
||||
extent_size_set(extent, size);
|
||||
extent_slab_set(extent, slab);
|
||||
extent_szind_set(extent, szind);
|
||||
extent_sn_set(extent, sn);
|
||||
extent_state_set(extent, state);
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
extent_committed_set(extent, committed);
|
||||
extent_dumpable_set(extent, dumpable);
|
||||
ql_elm_new(extent, ql_link);
|
||||
if (!maps_coalesce) {
|
||||
extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
|
||||
false);
|
||||
}
|
||||
if (config_prof) {
|
||||
extent_prof_tctx_set(extent, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
||||
extent_arena_set(extent, NULL);
|
||||
extent_addr_set(extent, addr);
|
||||
extent_bsize_set(extent, bsize);
|
||||
extent_slab_set(extent, false);
|
||||
extent_szind_set(extent, SC_NSIZES);
|
||||
extent_sn_set(extent, sn);
|
||||
extent_state_set(extent, extent_state_active);
|
||||
extent_zeroed_set(extent, true);
|
||||
extent_committed_set(extent, true);
|
||||
extent_dumpable_set(extent, true);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_init(extent_list_t *list) {
|
||||
ql_new(list);
|
||||
}
|
||||
|
||||
static inline extent_t *
|
||||
extent_list_first(const extent_list_t *list) {
|
||||
return ql_first(list);
|
||||
}
|
||||
|
||||
static inline extent_t *
|
||||
extent_list_last(const extent_list_t *list) {
|
||||
return ql_last(list, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_append(extent_list_t *list, extent_t *extent) {
|
||||
ql_tail_insert(list, extent, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_prepend(extent_list_t *list, extent_t *extent) {
|
||||
ql_head_insert(list, extent, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
||||
extent_t *to_insert) {
|
||||
ql_after_insert(to_remove, to_insert, ql_link);
|
||||
ql_remove(list, to_remove, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
||||
ql_remove(list, extent, ql_link);
|
||||
}
|
||||
|
||||
static inline int
|
||||
extent_sn_comp(const extent_t *a, const extent_t *b) {
|
||||
size_t a_sn = extent_sn_get(a);
|
||||
size_t b_sn = extent_sn_get(b);
|
||||
|
||||
return (a_sn > b_sn) - (a_sn < b_sn);
|
||||
}
|
||||
|
||||
static inline int
|
||||
extent_esn_comp(const extent_t *a, const extent_t *b) {
|
||||
size_t a_esn = extent_esn_get(a);
|
||||
size_t b_esn = extent_esn_get(b);
|
||||
|
||||
return (a_esn > b_esn) - (a_esn < b_esn);
|
||||
}
|
||||
|
||||
static inline int
|
||||
extent_ad_comp(const extent_t *a, const extent_t *b) {
|
||||
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||
|
||||
return (a_addr > b_addr) - (a_addr < b_addr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
extent_ead_comp(const extent_t *a, const extent_t *b) {
|
||||
uintptr_t a_eaddr = (uintptr_t)a;
|
||||
uintptr_t b_eaddr = (uintptr_t)b;
|
||||
|
||||
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
extent_snad_comp(const extent_t *a, const extent_t *b) {
|
||||
int ret;
|
||||
|
||||
ret = extent_sn_comp(a, b);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = extent_ad_comp(a, b);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
extent_esnead_comp(const extent_t *a, const extent_t *b) {
|
||||
int ret;
|
||||
|
||||
ret = extent_esn_comp(a, b);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = extent_ead_comp(a, b);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
|
||||
10
deps/jemalloc/include/jemalloc/internal/extent_mmap.h
vendored
Normal file
10
deps/jemalloc/include/jemalloc/internal/extent_mmap.h
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||
|
||||
extern bool opt_retain;
|
||||
|
||||
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit);
|
||||
bool extent_dalloc_mmap(void *addr, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
|
||||
256
deps/jemalloc/include/jemalloc/internal/extent_structs.h
vendored
Normal file
256
deps/jemalloc/include/jemalloc/internal/extent_structs.h
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
typedef enum {
|
||||
extent_state_active = 0,
|
||||
extent_state_dirty = 1,
|
||||
extent_state_muzzy = 2,
|
||||
extent_state_retained = 3
|
||||
} extent_state_t;
|
||||
|
||||
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
||||
struct extent_s {
|
||||
/*
|
||||
* Bitfield containing several fields:
|
||||
*
|
||||
* a: arena_ind
|
||||
* b: slab
|
||||
* c: committed
|
||||
* d: dumpable
|
||||
* z: zeroed
|
||||
* t: state
|
||||
* i: szind
|
||||
* f: nfree
|
||||
* s: bin_shard
|
||||
* n: sn
|
||||
*
|
||||
* nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
|
||||
*
|
||||
* arena_ind: Arena from which this extent came, or all 1 bits if
|
||||
* unassociated.
|
||||
*
|
||||
* slab: The slab flag indicates whether the extent is used for a slab
|
||||
* of small regions. This helps differentiate small size classes,
|
||||
* and it indicates whether interior pointers can be looked up via
|
||||
* iealloc().
|
||||
*
|
||||
* committed: The committed flag indicates whether physical memory is
|
||||
* committed to the extent, whether explicitly or implicitly
|
||||
* as on a system that overcommits and satisfies physical
|
||||
* memory needs on demand via soft page faults.
|
||||
*
|
||||
* dumpable: The dumpable flag indicates whether or not we've set the
|
||||
* memory in question to be dumpable. Note that this
|
||||
* interacts somewhat subtly with user-specified extent hooks,
|
||||
* since we don't know if *they* are fiddling with
|
||||
* dumpability (in which case, we don't want to undo whatever
|
||||
* they're doing). To deal with this scenario, we:
|
||||
* - Make dumpable false only for memory allocated with the
|
||||
* default hooks.
|
||||
* - Only allow memory to go from non-dumpable to dumpable,
|
||||
* and only once.
|
||||
* - Never make the OS call to allow dumping when the
|
||||
* dumpable bit is already set.
|
||||
* These three constraints mean that we will never
|
||||
* accidentally dump user memory that the user meant to set
|
||||
* nondumpable with their extent hooks.
|
||||
*
|
||||
*
|
||||
* zeroed: The zeroed flag is used by extent recycling code to track
|
||||
* whether memory is zero-filled.
|
||||
*
|
||||
* state: The state flag is an extent_state_t.
|
||||
*
|
||||
* szind: The szind flag indicates usable size class index for
|
||||
* allocations residing in this extent, regardless of whether the
|
||||
* extent is a slab. Extent size and usable size often differ
|
||||
* even for non-slabs, either due to sz_large_pad or promotion of
|
||||
* sampled small regions.
|
||||
*
|
||||
* nfree: Number of free regions in slab.
|
||||
*
|
||||
* bin_shard: the shard of the bin from which this extent came.
|
||||
*
|
||||
* sn: Serial number (potentially non-unique).
|
||||
*
|
||||
* Serial numbers may wrap around if !opt_retain, but as long as
|
||||
* comparison functions fall back on address comparison for equal
|
||||
* serial numbers, stable (if imperfect) ordering is maintained.
|
||||
*
|
||||
* Serial numbers may not be unique even in the absence of
|
||||
* wrap-around, e.g. when splitting an extent and assigning the same
|
||||
* serial number to both resulting adjacent extents.
|
||||
*/
|
||||
uint64_t e_bits;
|
||||
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
||||
|
||||
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
||||
#define EXTENT_BITS_ARENA_SHIFT 0
|
||||
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SLAB_WIDTH 1
|
||||
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
|
||||
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_COMMITTED_WIDTH 1
|
||||
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
|
||||
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_DUMPABLE_WIDTH 1
|
||||
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
|
||||
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_ZEROED_WIDTH 1
|
||||
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
|
||||
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_STATE_WIDTH 2
|
||||
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
|
||||
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
|
||||
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
|
||||
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
|
||||
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
|
||||
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_BINSHARD_WIDTH 6
|
||||
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
|
||||
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_IS_HEAD_WIDTH 1
|
||||
#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
|
||||
#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
|
||||
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
||||
|
||||
/* Pointer to the extent that this structure is responsible for. */
|
||||
void *e_addr;
|
||||
|
||||
union {
|
||||
/*
|
||||
* Extent size and serial number associated with the extent
|
||||
* structure (different than the serial number for the extent at
|
||||
* e_addr).
|
||||
*
|
||||
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
||||
*/
|
||||
size_t e_size_esn;
|
||||
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
|
||||
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
|
||||
/* Base extent size, which may not be a multiple of PAGE. */
|
||||
size_t e_bsize;
|
||||
};
|
||||
|
||||
/*
|
||||
* List linkage, used by a variety of lists:
|
||||
* - bin_t's slabs_full
|
||||
* - extents_t's LRU
|
||||
* - stashed dirty extents
|
||||
* - arena's large allocations
|
||||
*/
|
||||
ql_elm(extent_t) ql_link;
|
||||
|
||||
/*
|
||||
* Linkage for per size class sn/address-ordered heaps, and
|
||||
* for extent_avail
|
||||
*/
|
||||
phn(extent_t) ph_link;
|
||||
|
||||
union {
|
||||
/* Small region slab metadata. */
|
||||
arena_slab_data_t e_slab_data;
|
||||
|
||||
/* Profiling data, used for large objects. */
|
||||
struct {
|
||||
/* Time when this was allocated. */
|
||||
nstime_t e_alloc_time;
|
||||
/* Points to a prof_tctx_t. */
|
||||
atomic_p_t e_prof_tctx;
|
||||
};
|
||||
};
|
||||
};
|
||||
typedef ql_head(extent_t) extent_list_t;
|
||||
typedef ph(extent_t) extent_tree_t;
|
||||
typedef ph(extent_t) extent_heap_t;
|
||||
|
||||
/* Quantized collection of extents, with built-in LRU queue. */
|
||||
struct extents_s {
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
/*
|
||||
* Quantized per size class heaps of extents.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
extent_heap_t heaps[SC_NPSIZES + 1];
|
||||
atomic_zu_t nextents[SC_NPSIZES + 1];
|
||||
atomic_zu_t nbytes[SC_NPSIZES + 1];
|
||||
|
||||
/*
|
||||
* Bitmap for which set bits correspond to non-empty heaps.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
|
||||
|
||||
/*
|
||||
* LRU of all extents in heaps.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
extent_list_t lru;
|
||||
|
||||
/*
|
||||
* Page sum for all extents in heaps.
|
||||
*
|
||||
* The synchronization here is a little tricky. Modifications to npages
|
||||
* must hold mtx, but reads need not (though, a reader who sees npages
|
||||
* without holding the mutex can't assume anything about the rest of the
|
||||
* state of the extents_t).
|
||||
*/
|
||||
atomic_zu_t npages;
|
||||
|
||||
/* All stored extents must be in the same state. */
|
||||
extent_state_t state;
|
||||
|
||||
/*
|
||||
* If true, delay coalescing until eviction; otherwise coalesce during
|
||||
* deallocation.
|
||||
*/
|
||||
bool delay_coalesce;
|
||||
};
|
||||
|
||||
/*
|
||||
* The following two structs are for experimental purposes. See
|
||||
* experimental_utilization_query_ctl and
|
||||
* experimental_utilization_batch_query_ctl in src/ctl.c.
|
||||
*/
|
||||
|
||||
struct extent_util_stats_s {
|
||||
size_t nfree;
|
||||
size_t nregs;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct extent_util_stats_verbose_s {
|
||||
void *slabcur_addr;
|
||||
size_t nfree;
|
||||
size_t nregs;
|
||||
size_t size;
|
||||
size_t bin_nfree;
|
||||
size_t bin_nregs;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
|
||||
23
deps/jemalloc/include/jemalloc/internal/extent_types.h
vendored
Normal file
23
deps/jemalloc/include/jemalloc/internal/extent_types.h
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||
|
||||
typedef struct extent_s extent_t;
|
||||
typedef struct extents_s extents_t;
|
||||
|
||||
typedef struct extent_util_stats_s extent_util_stats_t;
|
||||
typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t;
|
||||
|
||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||
|
||||
/*
|
||||
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
|
||||
* is the max ratio between the size of the active extent and the new extent.
|
||||
*/
|
||||
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
||||
|
||||
typedef enum {
|
||||
EXTENT_NOT_HEAD,
|
||||
EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */
|
||||
} extent_head_state_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
|
||||
319
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
Normal file
319
deps/jemalloc/include/jemalloc/internal/hash.h
vendored
Normal file
@@ -0,0 +1,319 @@
|
||||
#ifndef JEMALLOC_INTERNAL_HASH_H
|
||||
#define JEMALLOC_INTERNAL_HASH_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* The following hash function is based on MurmurHash3, placed into the public
|
||||
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
|
||||
* details.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
/* Internal implementation. */
|
||||
static inline uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (32 - r)));
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (64 - r)));
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
||||
uint32_t ret;
|
||||
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
||||
uint64_t ret;
|
||||
|
||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return p[i];
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
hash_fmix_32(uint32_t h) {
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
h *= 0xc2b2ae35;
|
||||
h ^= h >> 16;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
hash_fmix_64(uint64_t k) {
|
||||
k ^= k >> 33;
|
||||
k *= KQU(0xff51afd7ed558ccd);
|
||||
k ^= k >> 33;
|
||||
k *= KQU(0xc4ceb9fe1a85ec53);
|
||||
k ^= k >> 33;
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
|
||||
const uint32_t c1 = 0xcc9e2d51;
|
||||
const uint32_t c2 = 0x1b873593;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2;
|
||||
|
||||
h1 ^= k1;
|
||||
h1 = hash_rotl_32(h1, 13);
|
||||
h1 = h1*5 + 0xe6546b64;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
|
||||
|
||||
uint32_t k1 = 0;
|
||||
|
||||
switch (len & 3) {
|
||||
case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len;
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
|
||||
return h1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
uint32_t h2 = seed;
|
||||
uint32_t h3 = seed;
|
||||
uint32_t h4 = seed;
|
||||
|
||||
const uint32_t c1 = 0x239b961b;
|
||||
const uint32_t c2 = 0xab0e9789;
|
||||
const uint32_t c3 = 0x38b34ae5;
|
||||
const uint32_t c4 = 0xa1e38b93;
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
|
||||
int i;
|
||||
|
||||
for (i = -nblocks; i; i++) {
|
||||
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
|
||||
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
|
||||
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
|
||||
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_32(h1, 19); h1 += h2;
|
||||
h1 = h1*5 + 0x561ccd1b;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_32(h2, 17); h2 += h3;
|
||||
h2 = h2*5 + 0x0bcaa747;
|
||||
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
|
||||
h3 = hash_rotl_32(h3, 15); h3 += h4;
|
||||
h3 = h3*5 + 0x96cd1c35;
|
||||
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
|
||||
h4 = hash_rotl_32(h4, 13); h4 += h1;
|
||||
h4 = h4*5 + 0x32ac3b17;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
|
||||
uint32_t k1 = 0;
|
||||
uint32_t k2 = 0;
|
||||
uint32_t k3 = 0;
|
||||
uint32_t k4 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 13: k4 ^= tail[12] << 0;
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH
|
||||
case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 9: k3 ^= tail[ 8] << 0;
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH
|
||||
case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 5: k2 ^= tail[ 4] << 0;
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH
|
||||
case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 1: k1 ^= tail[ 0] << 0;
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
h1 = hash_fmix_32(h1);
|
||||
h2 = hash_fmix_32(h2);
|
||||
h3 = hash_fmix_32(h3);
|
||||
h4 = hash_fmix_32(h4);
|
||||
|
||||
h1 += h2; h1 += h3; h1 += h4;
|
||||
h2 += h1; h3 += h1; h4 += h1;
|
||||
|
||||
r_out[0] = (((uint64_t) h2) << 32) | h1;
|
||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||
}
|
||||
|
||||
static inline void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
const uint64_t c1 = KQU(0x87c37b91114253d5);
|
||||
const uint64_t c2 = KQU(0x4cf5ad432745937f);
|
||||
|
||||
/* body */
|
||||
{
|
||||
const uint64_t *blocks = (const uint64_t *) (data);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nblocks; i++) {
|
||||
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
|
||||
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
|
||||
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
|
||||
h1 = hash_rotl_64(h1, 27); h1 += h2;
|
||||
h1 = h1*5 + 0x52dce729;
|
||||
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
h2 = hash_rotl_64(h2, 31); h2 += h1;
|
||||
h2 = h2*5 + 0x38495ab5;
|
||||
}
|
||||
}
|
||||
|
||||
/* tail */
|
||||
{
|
||||
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
|
||||
uint64_t k1 = 0;
|
||||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH
|
||||
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH
|
||||
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
}
|
||||
|
||||
/* finalization */
|
||||
h1 ^= len; h2 ^= len;
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
h1 = hash_fmix_64(h1);
|
||||
h2 = hash_fmix_64(h2);
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
r_out[0] = h1;
|
||||
r_out[1] = h2;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* API. */
|
||||
static inline void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
||||
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
|
||||
#else
|
||||
{
|
||||
uint64_t hashes[2];
|
||||
hash_x86_128(key, (int)len, seed, hashes);
|
||||
r_hash[0] = (size_t)hashes[0];
|
||||
r_hash[1] = (size_t)hashes[1];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HASH_H */
|
||||
163
deps/jemalloc/include/jemalloc/internal/hook.h
vendored
Normal file
163
deps/jemalloc/include/jemalloc/internal/hook.h
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
#ifndef JEMALLOC_INTERNAL_HOOK_H
|
||||
#define JEMALLOC_INTERNAL_HOOK_H
|
||||
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/*
|
||||
* This API is *extremely* experimental, and may get ripped out, changed in API-
|
||||
* and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
|
||||
*
|
||||
* It allows hooking the stateful parts of the API to see changes as they
|
||||
* happen.
|
||||
*
|
||||
* Allocation hooks are called after the allocation is done, free hooks are
|
||||
* called before the free is done, and expand hooks are called after the
|
||||
* allocation is expanded.
|
||||
*
|
||||
* For realloc and rallocx, if the expansion happens in place, the expansion
|
||||
* hook is called. If it is moved, then the alloc hook is called on the new
|
||||
* location, and then the free hook is called on the old location (i.e. both
|
||||
* hooks are invoked in between the alloc and the dalloc).
|
||||
*
|
||||
* If we return NULL from OOM, then usize might not be trustworthy. Calling
|
||||
* realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
|
||||
* only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0),
|
||||
* and only calls the alloc hook).
|
||||
*
|
||||
* Reentrancy:
|
||||
* Reentrancy is guarded against from within the hook implementation. If you
|
||||
* call allocator functions from within a hook, the hooks will not be invoked
|
||||
* again.
|
||||
* Threading:
|
||||
* The installation of a hook synchronizes with all its uses. If you can
|
||||
* prove the installation of a hook happens-before a jemalloc entry point,
|
||||
* then the hook will get invoked (unless there's a racing removal).
|
||||
*
|
||||
* Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
|
||||
* allocates and has the alloc hook invoked, then a subsequent free on the
|
||||
* same thread will also have the free hook invoked).
|
||||
*
|
||||
* The *removal* of a hook does *not* block until all threads are done with
|
||||
* the hook. Hook authors have to be resilient to this, and need some
|
||||
* out-of-band mechanism for cleaning up any dynamically allocated memory
|
||||
* associated with their hook.
|
||||
* Ordering:
|
||||
* Order of hook execution is unspecified, and may be different than insertion
|
||||
* order.
|
||||
*/
|
||||
|
||||
#define HOOK_MAX 4
|
||||
|
||||
enum hook_alloc_e {
|
||||
hook_alloc_malloc,
|
||||
hook_alloc_posix_memalign,
|
||||
hook_alloc_aligned_alloc,
|
||||
hook_alloc_calloc,
|
||||
hook_alloc_memalign,
|
||||
hook_alloc_valloc,
|
||||
hook_alloc_mallocx,
|
||||
|
||||
/* The reallocating functions have both alloc and dalloc variants */
|
||||
hook_alloc_realloc,
|
||||
hook_alloc_rallocx,
|
||||
};
|
||||
/*
|
||||
* We put the enum typedef after the enum, since this file may get included by
|
||||
* jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
|
||||
*/
|
||||
typedef enum hook_alloc_e hook_alloc_t;
|
||||
|
||||
enum hook_dalloc_e {
|
||||
hook_dalloc_free,
|
||||
hook_dalloc_dallocx,
|
||||
hook_dalloc_sdallocx,
|
||||
|
||||
/*
|
||||
* The dalloc halves of reallocation (not called if in-place expansion
|
||||
* happens).
|
||||
*/
|
||||
hook_dalloc_realloc,
|
||||
hook_dalloc_rallocx,
|
||||
};
|
||||
typedef enum hook_dalloc_e hook_dalloc_t;
|
||||
|
||||
|
||||
enum hook_expand_e {
|
||||
hook_expand_realloc,
|
||||
hook_expand_rallocx,
|
||||
hook_expand_xallocx,
|
||||
};
|
||||
typedef enum hook_expand_e hook_expand_t;
|
||||
|
||||
typedef void (*hook_alloc)(
|
||||
void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
|
||||
uintptr_t args_raw[3]);
|
||||
|
||||
typedef void (*hook_dalloc)(
|
||||
void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
|
||||
|
||||
typedef void (*hook_expand)(
|
||||
void *extra, hook_expand_t type, void *address, size_t old_usize,
|
||||
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
|
||||
|
||||
typedef struct hooks_s hooks_t;
|
||||
struct hooks_s {
|
||||
hook_alloc alloc_hook;
|
||||
hook_dalloc dalloc_hook;
|
||||
hook_expand expand_hook;
|
||||
void *extra;
|
||||
};
|
||||
|
||||
/*
|
||||
* Begin implementation details; everything above this point might one day live
|
||||
* in a public API. Everything below this point never will.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The realloc pathways haven't gotten any refactoring love in a while, and it's
|
||||
* fairly difficult to pass information from the entry point to the hooks. We
|
||||
* put the informaiton the hooks will need into a struct to encapsulate
|
||||
* everything.
|
||||
*
|
||||
* Much of these pathways are force-inlined, so that the compiler can avoid
|
||||
* materializing this struct until we hit an extern arena function. For fairly
|
||||
* goofy reasons, *many* of the realloc paths hit an extern arena function.
|
||||
* These paths are cold enough that it doesn't matter; eventually, we should
|
||||
* rewrite the realloc code to make the expand-in-place and the
|
||||
* free-then-realloc paths more orthogonal, at which point we don't need to
|
||||
* spread the hook logic all over the place.
|
||||
*/
|
||||
typedef struct hook_ralloc_args_s hook_ralloc_args_t;
|
||||
struct hook_ralloc_args_s {
|
||||
/* I.e. as opposed to rallocx. */
|
||||
bool is_realloc;
|
||||
/*
|
||||
* The expand hook takes 4 arguments, even if only 3 are actually used;
|
||||
* we add an extra one in case the user decides to memcpy without
|
||||
* looking too closely at the hooked function.
|
||||
*/
|
||||
uintptr_t args[4];
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns an opaque handle to be used when removing the hook. NULL means that
|
||||
* we couldn't install the hook.
|
||||
*/
|
||||
bool hook_boot();
|
||||
|
||||
void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
|
||||
/* Uninstalls the hook with the handle previously returned from hook_install. */
|
||||
void hook_remove(tsdn_t *tsdn, void *opaque);
|
||||
|
||||
/* Hooks */
|
||||
|
||||
void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
|
||||
uintptr_t args_raw[3]);
|
||||
|
||||
void hook_invoke_dalloc(hook_dalloc_t type, void *address,
|
||||
uintptr_t args_raw[3]);
|
||||
|
||||
void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
|
||||
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HOOK_H */
|
||||
19
deps/jemalloc/include/jemalloc/internal/hooks.h
vendored
Normal file
19
deps/jemalloc/include/jemalloc/internal/hooks.h
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
#ifndef JEMALLOC_INTERNAL_HOOKS_H
|
||||
#define JEMALLOC_INTERNAL_HOOKS_H
|
||||
|
||||
extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
|
||||
extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
|
||||
|
||||
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
|
||||
|
||||
#define open JEMALLOC_HOOK(open, hooks_libc_hook)
|
||||
#define read JEMALLOC_HOOK(read, hooks_libc_hook)
|
||||
#define write JEMALLOC_HOOK(write, hooks_libc_hook)
|
||||
#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook)
|
||||
#define close JEMALLOC_HOOK(close, hooks_libc_hook)
|
||||
#define creat JEMALLOC_HOOK(creat, hooks_libc_hook)
|
||||
#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook)
|
||||
/* Note that this is undef'd and re-define'd in src/prof.c. */
|
||||
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_HOOKS_H */
|
||||
46
deps/jemalloc/include/jemalloc/internal/huge.h
vendored
Normal file
46
deps/jemalloc/include/jemalloc/internal/huge.h
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
/* Huge allocation statistics. */
|
||||
extern uint64_t huge_nmalloc;
|
||||
extern uint64_t huge_ndalloc;
|
||||
extern size_t huge_allocated;
|
||||
|
||||
/* Protects chunk-related data structures. */
|
||||
extern malloc_mutex_t huge_mtx;
|
||||
|
||||
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
|
||||
void *huge_palloc(size_t size, size_t alignment, bool zero,
|
||||
dss_prec_t dss_prec);
|
||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra);
|
||||
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#endif
|
||||
void huge_dalloc(void *ptr, bool unmap);
|
||||
size_t huge_salloc(const void *ptr);
|
||||
dss_prec_t huge_dss_prec_get(arena_t *arena);
|
||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||
bool huge_boot(void);
|
||||
void huge_prefork(void);
|
||||
void huge_postfork_parent(void);
|
||||
void huge_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
1059
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h
vendored
Normal file
1059
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
94
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
vendored
Normal file
94
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
#ifndef JEMALLOC_INTERNAL_DECLS_H
|
||||
#define JEMALLOC_INTERNAL_DECLS_H
|
||||
|
||||
#include <math.h>
|
||||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
# include "msvc_compat/windows_extra.h"
|
||||
# ifdef _WIN64
|
||||
# if LG_VADDR <= 32
|
||||
# error Generate the headers using x64 vcargs
|
||||
# endif
|
||||
# else
|
||||
# if LG_VADDR > 32
|
||||
# undef LG_VADDR
|
||||
# define LG_VADDR 32
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# include <sys/param.h>
|
||||
# include <sys/mman.h>
|
||||
# if !defined(__pnacl__) && !defined(__native_client__)
|
||||
# include <sys/syscall.h>
|
||||
# if !defined(SYS_write) && defined(__NR_write)
|
||||
# define SYS_write __NR_write
|
||||
# endif
|
||||
# if defined(SYS_open) && defined(__aarch64__)
|
||||
/* Android headers may define SYS_open to __NR_open even though
|
||||
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
|
||||
# undef SYS_open
|
||||
# endif
|
||||
# include <sys/uio.h>
|
||||
# endif
|
||||
# include <pthread.h>
|
||||
# ifdef __FreeBSD__
|
||||
# include <pthread_np.h>
|
||||
# endif
|
||||
# include <signal.h>
|
||||
# ifdef JEMALLOC_OS_UNFAIR_LOCK
|
||||
# include <os/lock.h>
|
||||
# endif
|
||||
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
|
||||
# include <sched.h>
|
||||
# endif
|
||||
# include <errno.h>
|
||||
# include <sys/time.h>
|
||||
# include <time.h>
|
||||
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
|
||||
# include <mach/mach_time.h>
|
||||
# endif
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <limits.h>
|
||||
#ifndef SIZE_T_MAX
|
||||
# define SIZE_T_MAX SIZE_MAX
|
||||
#endif
|
||||
#ifndef SSIZE_MAX
|
||||
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
|
||||
#endif
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#ifndef offsetof
|
||||
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
|
||||
#endif
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <ctype.h>
|
||||
#ifdef _MSC_VER
|
||||
# include <io.h>
|
||||
typedef intptr_t ssize_t;
|
||||
# define PATH_MAX 1024
|
||||
# define STDERR_FILENO 2
|
||||
# define __func__ __FUNCTION__
|
||||
# ifdef JEMALLOC_HAS_RESTRICT
|
||||
# define restrict __restrict
|
||||
# endif
|
||||
/* Disable warnings about deprecated system functions. */
|
||||
# pragma warning(disable: 4996)
|
||||
#if _MSC_VER < 1800
|
||||
static int
|
||||
isblank(int c) {
|
||||
return (c == '\t' || c == ' ');
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
# include <unistd.h>
|
||||
#endif
|
||||
#include <fcntl.h>
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_H */
|
||||
57
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
vendored
Normal file
57
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/tsd_types.h"
|
||||
|
||||
/* TSD checks this to set thread local slow state accordingly. */
|
||||
extern bool malloc_slow;
|
||||
|
||||
/* Run-time options. */
|
||||
extern bool opt_abort;
|
||||
extern bool opt_abort_conf;
|
||||
extern bool opt_confirm_conf;
|
||||
extern const char *opt_junk;
|
||||
extern bool opt_junk_alloc;
|
||||
extern bool opt_junk_free;
|
||||
extern bool opt_utrace;
|
||||
extern bool opt_xmalloc;
|
||||
extern bool opt_zero;
|
||||
extern unsigned opt_narenas;
|
||||
|
||||
/* Number of CPUs. */
|
||||
extern unsigned ncpus;
|
||||
|
||||
/* Number of arenas used for automatic multiplexing of threads and arenas. */
|
||||
extern unsigned narenas_auto;
|
||||
|
||||
/* Base index for manual arenas. */
|
||||
extern unsigned manual_arena_base;
|
||||
|
||||
/*
|
||||
* Arenas that are used to service external requests. Not all elements of the
|
||||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
*/
|
||||
extern atomic_p_t arenas[];
|
||||
|
||||
void *a0malloc(size_t size);
|
||||
void a0dalloc(void *ptr);
|
||||
void *bootstrap_malloc(size_t size);
|
||||
void *bootstrap_calloc(size_t num, size_t size);
|
||||
void bootstrap_free(void *ptr);
|
||||
void arena_set(unsigned ind, arena_t *arena);
|
||||
unsigned narenas_total_get(void);
|
||||
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
|
||||
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
|
||||
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
|
||||
void iarena_cleanup(tsd_t *tsd);
|
||||
void arena_cleanup(tsd_t *tsd);
|
||||
void arenas_tdata_cleanup(tsd_t *tsd);
|
||||
void jemalloc_prefork(void);
|
||||
void jemalloc_postfork_parent(void);
|
||||
void jemalloc_postfork_child(void);
|
||||
bool malloc_initialized(void);
|
||||
void je_sdallocx_noflags(void *ptr, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
|
||||
94
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
vendored
Normal file
94
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
#ifndef JEMALLOC_INTERNAL_INCLUDES_H
|
||||
#define JEMALLOC_INTERNAL_INCLUDES_H
|
||||
|
||||
/*
|
||||
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
|
||||
* but there are circular dependencies that cannot be broken without
|
||||
* substantial performance degradation.
|
||||
*
|
||||
* Historically, we dealt with this by each header into four sections (types,
|
||||
* structs, externs, and inlines), and included each header file multiple times
|
||||
* in this file, picking out the portion we want on each pass using the
|
||||
* following #defines:
|
||||
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
|
||||
* types.
|
||||
* JEMALLOC_H_STRUCTS : Data structures.
|
||||
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
|
||||
* JEMALLOC_H_INLINES : Inline functions.
|
||||
*
|
||||
* We're moving toward a world in which the dependencies are explicit; each file
|
||||
* will #include the headers it depends on (rather than relying on them being
|
||||
* implicitly available via this file including every header file in the
|
||||
* project).
|
||||
*
|
||||
* We're now in an intermediate state: we've broken up the header files to avoid
|
||||
* having to include each one multiple times, but have not yet moved the
|
||||
* dependency information into the header files (i.e. we still rely on the
|
||||
* ordering in this file to ensure all a header's dependencies are available in
|
||||
* its translation unit). Each component is now broken up into multiple header
|
||||
* files, corresponding to the sections above (e.g. instead of "foo.h", we now
|
||||
* have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
|
||||
*
|
||||
* Those files which have been converted to explicitly include their
|
||||
* inter-component dependencies are now in the initial HERMETIC HEADERS
|
||||
* section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
|
||||
* must be included first in every translation unit) for system headers and
|
||||
* global jemalloc definitions, however.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
/* TYPES */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/extent_types.h"
|
||||
#include "jemalloc/internal/base_types.h"
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/tcache_types.h"
|
||||
#include "jemalloc/internal/prof_types.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* STRUCTS */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/arena_structs_a.h"
|
||||
#include "jemalloc/internal/extent_structs.h"
|
||||
#include "jemalloc/internal/base_structs.h"
|
||||
#include "jemalloc/internal/prof_structs.h"
|
||||
#include "jemalloc/internal/arena_structs_b.h"
|
||||
#include "jemalloc/internal/tcache_structs.h"
|
||||
#include "jemalloc/internal/background_thread_structs.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* EXTERNS */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_externs.h"
|
||||
#include "jemalloc/internal/extent_externs.h"
|
||||
#include "jemalloc/internal/base_externs.h"
|
||||
#include "jemalloc/internal/arena_externs.h"
|
||||
#include "jemalloc/internal/large_externs.h"
|
||||
#include "jemalloc/internal/tcache_externs.h"
|
||||
#include "jemalloc/internal/prof_externs.h"
|
||||
#include "jemalloc/internal/background_thread_externs.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
|
||||
#include "jemalloc/internal/base_inlines.h"
|
||||
/*
|
||||
* Include portions of arena code interleaved with tcache code in order to
|
||||
* resolve circular dependencies.
|
||||
*/
|
||||
#include "jemalloc/internal/prof_inlines_a.h"
|
||||
#include "jemalloc/internal/arena_inlines_a.h"
|
||||
#include "jemalloc/internal/extent_inlines.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
|
||||
#include "jemalloc/internal/tcache_inlines.h"
|
||||
#include "jemalloc/internal/arena_inlines_b.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
|
||||
#include "jemalloc/internal/prof_inlines_b.h"
|
||||
#include "jemalloc/internal/background_thread_inlines.h"
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
|
||||
174
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
vendored
Normal file
174
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
|
||||
#define JEMALLOC_INTERNAL_INLINES_A_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
|
||||
malloc_getcpu(void) {
|
||||
assert(have_percpu_arena);
|
||||
#if defined(_WIN32)
|
||||
return GetCurrentProcessorNumber();
|
||||
#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
return (malloc_cpuid_t)sched_getcpu();
|
||||
#else
|
||||
not_reached();
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Return the chosen arena index based on current cpu. */
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
percpu_arena_choose(void) {
|
||||
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
|
||||
|
||||
malloc_cpuid_t cpuid = malloc_getcpu();
|
||||
assert(cpuid >= 0);
|
||||
|
||||
unsigned arena_ind;
|
||||
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
|
||||
2)) {
|
||||
arena_ind = cpuid;
|
||||
} else {
|
||||
assert(opt_percpu_arena == per_phycpu_arena);
|
||||
/* Hyper threads on the same physical CPU share arena. */
|
||||
arena_ind = cpuid - ncpus / 2;
|
||||
}
|
||||
|
||||
return arena_ind;
|
||||
}
|
||||
|
||||
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
percpu_arena_ind_limit(percpu_arena_mode_t mode) {
|
||||
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
|
||||
if (mode == per_phycpu_arena && ncpus > 1) {
|
||||
if (ncpus % 2) {
|
||||
/* This likely means a misconfig. */
|
||||
return ncpus / 2 + 1;
|
||||
}
|
||||
return ncpus / 2;
|
||||
} else {
|
||||
return ncpus;
|
||||
}
|
||||
}
|
||||
|
||||
static inline arena_tdata_t *
|
||||
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
||||
arena_tdata_t *tdata;
|
||||
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
|
||||
|
||||
if (unlikely(arenas_tdata == NULL)) {
|
||||
/* arenas_tdata hasn't been initialized yet. */
|
||||
return arena_tdata_get_hard(tsd, ind);
|
||||
}
|
||||
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
|
||||
/*
|
||||
* ind is invalid, cache is old (too small), or tdata to be
|
||||
* initialized.
|
||||
*/
|
||||
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
|
||||
NULL);
|
||||
}
|
||||
|
||||
tdata = &arenas_tdata[ind];
|
||||
if (likely(tdata != NULL) || !refresh_if_missing) {
|
||||
return tdata;
|
||||
}
|
||||
return arena_tdata_get_hard(tsd, ind);
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
||||
arena_t *ret;
|
||||
|
||||
assert(ind < MALLOCX_ARENA_LIMIT);
|
||||
|
||||
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
|
||||
if (unlikely(ret == NULL)) {
|
||||
if (init_if_missing) {
|
||||
ret = arena_init(tsdn, ind,
|
||||
(extent_hooks_t *)&extent_hooks_default);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline ticker_t *
|
||||
decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
||||
arena_tdata_t *tdata;
|
||||
|
||||
tdata = arena_tdata_get(tsd, ind, true);
|
||||
if (unlikely(tdata == NULL)) {
|
||||
return NULL;
|
||||
}
|
||||
return &tdata->decay_ticker;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
||||
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
|
||||
assert(binind < SC_NBINS);
|
||||
return &tcache->bins_small[binind];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
||||
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
|
||||
assert(binind >= SC_NBINS &&binind < nhbins);
|
||||
return &tcache->bins_large[binind - SC_NBINS];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tcache_available(tsd_t *tsd) {
|
||||
/*
|
||||
* Thread specific auto tcache might be unavailable if: 1) during tcache
|
||||
* initialization, or 2) disabled through thread.tcache.enabled mallctl
|
||||
* or config options. This check covers all cases.
|
||||
*/
|
||||
if (likely(tsd_tcache_enabled_get(tsd))) {
|
||||
/* Associated arena == NULL implies tcache init in progress. */
|
||||
assert(tsd_tcachep_get(tsd)->arena == NULL ||
|
||||
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
|
||||
NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcache_get(tsd_t *tsd) {
|
||||
if (!tcache_available(tsd)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tsd_tcachep_get(tsd);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
|
||||
/* arena is the current context. Reentry from a0 is not allowed. */
|
||||
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
|
||||
|
||||
bool fast = tsd_fast(tsd);
|
||||
assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
|
||||
++*tsd_reentrancy_levelp_get(tsd);
|
||||
if (fast) {
|
||||
/* Prepare slow path for reentrancy. */
|
||||
tsd_slow_update(tsd);
|
||||
assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
post_reentrancy(tsd_t *tsd) {
|
||||
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
|
||||
assert(*reentrancy_level > 0);
|
||||
if (--*reentrancy_level == 0) {
|
||||
tsd_slow_update(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|
||||
87
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
vendored
Normal file
87
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
|
||||
#define JEMALLOC_INTERNAL_INLINES_B_H
|
||||
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
/* Choose an arena based on a per-thread value. */
|
||||
static inline arena_t *
|
||||
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
||||
arena_t *ret;
|
||||
|
||||
if (arena != NULL) {
|
||||
return arena;
|
||||
}
|
||||
|
||||
/* During reentrancy, arena 0 is the safest bet. */
|
||||
if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
|
||||
return arena_get(tsd_tsdn(tsd), 0, true);
|
||||
}
|
||||
|
||||
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
|
||||
if (unlikely(ret == NULL)) {
|
||||
ret = arena_choose_hard(tsd, internal);
|
||||
assert(ret);
|
||||
if (tcache_available(tsd)) {
|
||||
tcache_t *tcache = tcache_get(tsd);
|
||||
if (tcache->arena != NULL) {
|
||||
/* See comments in tcache_data_init().*/
|
||||
assert(tcache->arena ==
|
||||
arena_get(tsd_tsdn(tsd), 0, false));
|
||||
if (tcache->arena != ret) {
|
||||
tcache_arena_reassociate(tsd_tsdn(tsd),
|
||||
tcache, ret);
|
||||
}
|
||||
} else {
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache,
|
||||
ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that for percpu arena, if the current arena is outside of the
|
||||
* auto percpu arena range, (i.e. thread is assigned to a manually
|
||||
* managed arena), then percpu arena is skipped.
|
||||
*/
|
||||
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
|
||||
!internal && (arena_ind_get(ret) <
|
||||
percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
|
||||
tsd_tsdn(tsd))) {
|
||||
unsigned ind = percpu_arena_choose();
|
||||
if (arena_ind_get(ret) != ind) {
|
||||
percpu_arena_update(tsd, ind);
|
||||
ret = tsd_arena_get(tsd);
|
||||
}
|
||||
ret->last_thd = tsd_tsdn(tsd);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
arena_choose(tsd_t *tsd, arena_t *arena) {
|
||||
return arena_choose_impl(tsd, arena, false);
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
arena_ichoose(tsd_t *tsd, arena_t *arena) {
|
||||
return arena_choose_impl(tsd, arena, true);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
arena_is_auto(arena_t *arena) {
|
||||
assert(narenas_auto > 0);
|
||||
|
||||
return (arena_ind_get(arena) < manual_arena_base);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
iealloc(tsdn_t *tsdn, const void *ptr) {
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, true);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
|
||||
222
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
vendored
Normal file
222
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
#ifndef JEMALLOC_INTERNAL_INLINES_C_H
|
||||
#define JEMALLOC_INTERNAL_INLINES_C_H
|
||||
|
||||
#include "jemalloc/internal/hook.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
/*
|
||||
* Translating the names of the 'i' functions:
|
||||
* Abbreviations used in the first part of the function name (before
|
||||
* alloc/dalloc) describe what that function accomplishes:
|
||||
* a: arena (query)
|
||||
* s: size (query, or sized deallocation)
|
||||
* e: extent (query)
|
||||
* p: aligned (allocates)
|
||||
* vs: size (query, without knowing that the pointer is into the heap)
|
||||
* r: rallocx implementation
|
||||
* x: xallocx implementation
|
||||
* Abbreviations used in the second part of the function name (after
|
||||
* alloc/dalloc) describe the arguments it takes
|
||||
* z: whether to return zeroed memory
|
||||
* t: accepts a tcache_t * parameter
|
||||
* m: accepts an arena_t * parameter
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
return arena_aalloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
isalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
return arena_salloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
bool is_internal, arena_t *arena, bool slow_path) {
|
||||
void *ret;
|
||||
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
}
|
||||
|
||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
|
||||
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
|
||||
NULL, slow_path);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
tcache_t *tcache, bool is_internal, arena_t *arena) {
|
||||
void *ret;
|
||||
|
||||
assert(usize != 0);
|
||||
assert(usize == sz_sa2u(usize, alignment));
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
|
||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
tcache_t *tcache, arena_t *arena) {
|
||||
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
|
||||
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
|
||||
tcache_get(tsd), false, NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
ivsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
return arena_vsalloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
|
||||
bool is_internal, bool slow_path) {
|
||||
assert(ptr != NULL);
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
if (config_stats && is_internal) {
|
||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
||||
}
|
||||
if (!is_internal && !tsdn_null(tsdn) &&
|
||||
tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||
assert(tcache == NULL);
|
||||
}
|
||||
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
idalloc(tsd_t *tsd, void *ptr) {
|
||||
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
|
||||
hook_ralloc_args_t *hook_args) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
void *p;
|
||||
size_t usize, copysize;
|
||||
|
||||
usize = sz_sa2u(size, alignment);
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Copy at most size bytes (not size+extra), since the caller has no
|
||||
* expectation that the extra bytes will be reliably preserved.
|
||||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
memcpy(p, ptr, copysize);
|
||||
hook_invoke_alloc(hook_args->is_realloc
|
||||
? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
|
||||
hook_args->args);
|
||||
hook_invoke_dalloc(hook_args->is_realloc
|
||||
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
|
||||
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* is_realloc threads through the knowledge of whether or not this call comes
|
||||
* from je_realloc (as opposed to je_rallocx); this ensures that we pass the
|
||||
* correct entry point into any hooks.
|
||||
* Note that these functions are all force-inlined, so no actual bool gets
|
||||
* passed-around anywhere.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||
bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
|
||||
{
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||
!= 0) {
|
||||
/*
|
||||
* Existing object alignment is inadequate; allocate new space
|
||||
* and copy.
|
||||
*/
|
||||
return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
|
||||
zero, tcache, arena, hook_args);
|
||||
}
|
||||
|
||||
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
|
||||
tcache, hook_args);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||
bool zero, hook_ralloc_args_t *hook_args) {
|
||||
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
|
||||
tcache_get(tsd), NULL, hook_args);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, size_t *newsize) {
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||
!= 0) {
|
||||
/* Existing object alignment is inadequate. */
|
||||
*newsize = oldsize;
|
||||
return true;
|
||||
}
|
||||
|
||||
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
|
||||
newsize);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
|
||||
114
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
vendored
Normal file
114
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MACROS_H
|
||||
#define JEMALLOC_INTERNAL_MACROS_H
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define JEMALLOC_ALWAYS_INLINE static inline
|
||||
#else
|
||||
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
|
||||
#endif
|
||||
#ifdef _MSC_VER
|
||||
# define inline _inline
|
||||
#endif
|
||||
|
||||
#define UNUSED JEMALLOC_ATTR(unused)
|
||||
|
||||
#define ZU(z) ((size_t)z)
|
||||
#define ZD(z) ((ssize_t)z)
|
||||
#define QU(q) ((uint64_t)q)
|
||||
#define QD(q) ((int64_t)q)
|
||||
|
||||
#define KZU(z) ZU(z##ULL)
|
||||
#define KZD(z) ZD(z##LL)
|
||||
#define KQU(q) QU(q##ULL)
|
||||
#define KQD(q) QI(q##LL)
|
||||
|
||||
#ifndef __DECONST
|
||||
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
||||
#endif
|
||||
|
||||
#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
|
||||
# define restrict
|
||||
#endif
|
||||
|
||||
/* Various function pointers are static and immutable except during testing. */
|
||||
#ifdef JEMALLOC_JET
|
||||
# define JET_MUTABLE
|
||||
#else
|
||||
# define JET_MUTABLE const
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
|
||||
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
|
||||
|
||||
#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
|
||||
&& defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
|
||||
#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
|
||||
#else
|
||||
#define JEMALLOC_FALLTHROUGH /* falls through */
|
||||
#endif
|
||||
|
||||
/* Diagnostic suppression macros */
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
|
||||
# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
/* #pragma GCC diagnostic first appeared in gcc 4.6. */
|
||||
#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
|
||||
(__GNUC_MINOR__ > 5)))) || defined(__clang__)
|
||||
/*
|
||||
* The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
|
||||
* diagnostic suppression macros and should not be used anywhere else.
|
||||
*/
|
||||
# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
|
||||
# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
|
||||
JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
|
||||
|
||||
/*
|
||||
* The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
|
||||
* all clang versions up to version 7 (currently trunk, unreleased). This macro
|
||||
* suppresses the warning for the affected compiler versions only.
|
||||
*/
|
||||
# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
|
||||
defined(__clang__)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
|
||||
# else
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# endif
|
||||
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
|
||||
# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
|
||||
# else
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# endif
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
|
||||
JEMALLOC_DIAGNOSTIC_PUSH \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
|
||||
#else
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH
|
||||
# define JEMALLOC_DIAGNOSTIC_POP
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Disables spurious diagnostics for all headers. Since these headers are not
|
||||
* included by users directly, it does not affect their diagnostic settings.
|
||||
*/
|
||||
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MACROS_H */
|
||||
114
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
vendored
Normal file
114
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_TYPES_H
|
||||
|
||||
#include "jemalloc/internal/quantum.h"
|
||||
|
||||
/* Page size index type. */
|
||||
typedef unsigned pszind_t;
|
||||
|
||||
/* Size class index type. */
|
||||
typedef unsigned szind_t;
|
||||
|
||||
/* Processor / core id type. */
|
||||
typedef int malloc_cpuid_t;
|
||||
|
||||
/*
|
||||
* Flags bits:
|
||||
*
|
||||
* a: arena
|
||||
* t: tcache
|
||||
* 0: unused
|
||||
* z: zero
|
||||
* n: alignment
|
||||
*
|
||||
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
|
||||
*/
|
||||
#define MALLOCX_ARENA_BITS 12
|
||||
#define MALLOCX_TCACHE_BITS 12
|
||||
#define MALLOCX_LG_ALIGN_BITS 6
|
||||
#define MALLOCX_ARENA_SHIFT 20
|
||||
#define MALLOCX_TCACHE_SHIFT 8
|
||||
#define MALLOCX_ARENA_MASK \
|
||||
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
|
||||
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
|
||||
#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
|
||||
#define MALLOCX_TCACHE_MASK \
|
||||
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
|
||||
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
|
||||
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
|
||||
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
|
||||
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
|
||||
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
|
||||
#define MALLOCX_ALIGN_GET(flags) \
|
||||
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
|
||||
#define MALLOCX_ZERO_GET(flags) \
|
||||
((bool)(flags & MALLOCX_ZERO))
|
||||
|
||||
#define MALLOCX_TCACHE_GET(flags) \
|
||||
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
|
||||
#define MALLOCX_ARENA_GET(flags) \
|
||||
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
|
||||
|
||||
/* Smallest size class to support. */
|
||||
#define TINY_MIN (1U << LG_TINY_MIN)
|
||||
|
||||
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
|
||||
#define LONG_MASK (LONG - 1)
|
||||
|
||||
/* Return the smallest long multiple that is >= a. */
|
||||
#define LONG_CEILING(a) \
|
||||
(((a) + LONG_MASK) & ~LONG_MASK)
|
||||
|
||||
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
|
||||
#define PTR_MASK (SIZEOF_PTR - 1)
|
||||
|
||||
/* Return the smallest (void *) multiple that is >= a. */
|
||||
#define PTR_CEILING(a) \
|
||||
(((a) + PTR_MASK) & ~PTR_MASK)
|
||||
|
||||
/*
|
||||
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
|
||||
* In addition, this controls the spacing of cacheline-spaced size classes.
|
||||
*
|
||||
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
|
||||
* only handle raw constants.
|
||||
*/
|
||||
#define LG_CACHELINE 6
|
||||
#define CACHELINE 64
|
||||
#define CACHELINE_MASK (CACHELINE - 1)
|
||||
|
||||
/* Return the smallest cacheline multiple that is >= s. */
|
||||
#define CACHELINE_CEILING(s) \
|
||||
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||
|
||||
/* Return the nearest aligned address at or below a. */
|
||||
#define ALIGNMENT_ADDR2BASE(a, alignment) \
|
||||
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
|
||||
|
||||
/* Return the offset between a and the nearest aligned address at or below a. */
|
||||
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
||||
((size_t)((uintptr_t)(a) & (alignment - 1)))
|
||||
|
||||
/* Return the smallest alignment multiple that is >= s. */
|
||||
#define ALIGNMENT_CEILING(s, alignment) \
|
||||
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
|
||||
|
||||
/* Declare a variable-length array. */
|
||||
#if __STDC_VERSION__ < 199901L
|
||||
# ifdef _MSC_VER
|
||||
# include <malloc.h>
|
||||
# define alloca _alloca
|
||||
# else
|
||||
# ifdef JEMALLOC_HAS_ALLOCA_H
|
||||
# include <alloca.h>
|
||||
# else
|
||||
# include <stdlib.h>
|
||||
# endif
|
||||
# endif
|
||||
# define VARIABLE_ARRAY(type, name, count) \
|
||||
type *name = alloca(sizeof(type) * (count))
|
||||
#else
|
||||
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TYPES_H */
|
||||
213
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
vendored
Normal file
213
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
#ifndef JEMALLOC_PREAMBLE_H
|
||||
#define JEMALLOC_PREAMBLE_H
|
||||
|
||||
#include "jemalloc_internal_defs.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
#include <sys/ktrace.h>
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
#ifdef JEMALLOC_JET
|
||||
# undef JEMALLOC_IS_MALLOC
|
||||
# define JEMALLOC_N(n) jet_##n
|
||||
# include "jemalloc/internal/public_namespace.h"
|
||||
# define JEMALLOC_NO_RENAME
|
||||
# include "../jemalloc.h"
|
||||
# undef JEMALLOC_NO_RENAME
|
||||
#else
|
||||
# define JEMALLOC_N(n) je_##n
|
||||
# include "../jemalloc.h"
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_OSATOMIC)
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_ZONE
|
||||
#include <mach/mach_error.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/vm_map.h>
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||
|
||||
/*
|
||||
* Note that the ordering matters here; the hook itself is name-mangled. We
|
||||
* want the inclusion of hooks to happen early, so that we hook as much as
|
||||
* possible.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
|
||||
# ifndef JEMALLOC_JET
|
||||
# include "jemalloc/internal/private_namespace.h"
|
||||
# else
|
||||
# include "jemalloc/internal/private_namespace_jet.h"
|
||||
# endif
|
||||
#endif
|
||||
#include "jemalloc/internal/test_hooks.h"
|
||||
|
||||
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
# define JEMALLOC_MADV_FREE 8
|
||||
#endif
|
||||
|
||||
static const bool config_debug =
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_dss =
|
||||
#ifdef JEMALLOC_DSS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_madvise_huge =
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_lazy_lock =
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
|
||||
static const bool config_prof =
|
||||
#ifdef JEMALLOC_PROF
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libgcc =
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libunwind =
|
||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool maps_coalesce =
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_stats =
|
||||
#ifdef JEMALLOC_STATS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_tls =
|
||||
#ifdef JEMALLOC_TLS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_utrace =
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_xmalloc =
|
||||
#ifdef JEMALLOC_XMALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_cache_oblivious =
|
||||
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, for jemalloc development use only at the moment. See the note
|
||||
* in jemalloc/internal/log.h.
|
||||
*/
|
||||
static const bool config_log =
|
||||
#ifdef JEMALLOC_LOG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Are extra safety checks enabled; things like checking the size of sized
|
||||
* deallocations, double-frees, etc.
|
||||
*/
|
||||
static const bool config_opt_safety_checks =
|
||||
#ifdef JEMALLOC_OPT_SAFETY_CHECKS
|
||||
true
|
||||
#elif defined(JEMALLOC_DEBUG)
|
||||
/*
|
||||
* This lets us only guard safety checks by one flag instead of two; fast
|
||||
* checks can guard solely by config_opt_safety_checks and run in debug mode
|
||||
* too.
|
||||
*/
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
static const bool have_percpu_arena =
|
||||
#ifdef JEMALLOC_PERCPU_ARENA
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, and not recommended; the application should take full
|
||||
* responsibility for tracking provenance.
|
||||
*/
|
||||
static const bool force_ivsalloc =
|
||||
#ifdef JEMALLOC_FORCE_IVSALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_background_thread =
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#endif /* JEMALLOC_PREAMBLE_H */
|
||||
179
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
vendored
Normal file
179
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
#ifndef JEMALLOC_PREAMBLE_H
|
||||
#define JEMALLOC_PREAMBLE_H
|
||||
|
||||
#include "jemalloc_internal_defs.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
#include <sys/ktrace.h>
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
#ifdef JEMALLOC_JET
|
||||
# undef JEMALLOC_IS_MALLOC
|
||||
# define JEMALLOC_N(n) jet_##n
|
||||
# include "jemalloc/internal/public_namespace.h"
|
||||
# define JEMALLOC_NO_RENAME
|
||||
# include "../jemalloc@install_suffix@.h"
|
||||
# undef JEMALLOC_NO_RENAME
|
||||
#else
|
||||
# define JEMALLOC_N(n) @private_namespace@##n
|
||||
# include "../jemalloc@install_suffix@.h"
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
||||
#include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_ZONE
|
||||
#include <mach/mach_error.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/vm_map.h>
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||
|
||||
/*
|
||||
* Note that the ordering matters here; the hook itself is name-mangled. We
|
||||
* want the inclusion of hooks to happen early, so that we hook as much as
|
||||
* possible.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
|
||||
# ifndef JEMALLOC_JET
|
||||
# include "jemalloc/internal/private_namespace.h"
|
||||
# else
|
||||
# include "jemalloc/internal/private_namespace_jet.h"
|
||||
# endif
|
||||
#endif
|
||||
#include "jemalloc/internal/hooks.h"
|
||||
|
||||
static const bool config_debug =
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_dss =
|
||||
#ifdef JEMALLOC_DSS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_lazy_lock =
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
|
||||
static const bool config_prof =
|
||||
#ifdef JEMALLOC_PROF
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libgcc =
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_prof_libunwind =
|
||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool maps_coalesce =
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_stats =
|
||||
#ifdef JEMALLOC_STATS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_thp =
|
||||
#ifdef JEMALLOC_THP
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_tls =
|
||||
#ifdef JEMALLOC_TLS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_utrace =
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_xmalloc =
|
||||
#ifdef JEMALLOC_XMALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_cache_oblivious =
|
||||
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
#endif
|
||||
static const bool have_percpu_arena =
|
||||
#ifdef JEMALLOC_PERCPU_ARENA
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, and not recommended; the application should take full
|
||||
* responsibility for tracking provenance.
|
||||
*/
|
||||
static const bool force_ivsalloc =
|
||||
#ifdef JEMALLOC_FORCE_IVSALLOC
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_background_thread =
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#endif /* JEMALLOC_PREAMBLE_H */
|
||||
32
deps/jemalloc/include/jemalloc/internal/large_externs.h
vendored
Normal file
32
deps/jemalloc/include/jemalloc/internal/large_externs.h
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/hook.h"
|
||||
|
||||
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
||||
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero);
|
||||
bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t usize_max, bool zero);
|
||||
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache,
|
||||
hook_ralloc_args_t *hook_args);
|
||||
|
||||
typedef void (large_dalloc_junk_t)(void *, size_t);
|
||||
extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
|
||||
|
||||
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
|
||||
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
|
||||
|
||||
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
|
||||
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||
void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
|
||||
void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
|
||||
|
||||
nstime_t large_prof_alloc_time_get(const extent_t *extent);
|
||||
void large_prof_alloc_time_set(extent_t *extent, nstime_t time);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
|
||||
115
deps/jemalloc/include/jemalloc/internal/log.h
vendored
Normal file
115
deps/jemalloc/include/jemalloc/internal/log.h
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
#ifndef JEMALLOC_INTERNAL_LOG_H
|
||||
#define JEMALLOC_INTERNAL_LOG_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
#ifdef JEMALLOC_LOG
|
||||
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
|
||||
#else
|
||||
# define JEMALLOC_LOG_VAR_BUFSIZE 1
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_LOG_BUFSIZE 4096
|
||||
|
||||
/*
|
||||
* The log malloc_conf option is a '|'-delimited list of log_var name segments
|
||||
* which should be logged. The names are themselves hierarchical, with '.' as
|
||||
* the delimiter (a "segment" is just a prefix in the log namespace). So, if
|
||||
* you have:
|
||||
*
|
||||
* log("arena", "log msg for arena"); // 1
|
||||
* log("arena.a", "log msg for arena.a"); // 2
|
||||
* log("arena.b", "log msg for arena.b"); // 3
|
||||
* log("arena.a.a", "log msg for arena.a.a"); // 4
|
||||
* log("extent.a", "log msg for extent.a"); // 5
|
||||
* log("extent.b", "log msg for extent.b"); // 6
|
||||
*
|
||||
* And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
|
||||
* 6 will print at runtime. You can enable logging from all log vars by
|
||||
* writing "log=.".
|
||||
*
|
||||
* None of this should be regarded as a stable API for right now. It's intended
|
||||
* as a debugging interface, to let us keep around some of our printf-debugging
|
||||
* statements.
|
||||
*/
|
||||
|
||||
extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
|
||||
extern atomic_b_t log_init_done;
|
||||
|
||||
typedef struct log_var_s log_var_t;
|
||||
struct log_var_s {
|
||||
/*
|
||||
* Lowest bit is "inited", second lowest is "enabled". Putting them in
|
||||
* a single word lets us avoid any fences on weak architectures.
|
||||
*/
|
||||
atomic_u_t state;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#define LOG_NOT_INITIALIZED 0U
|
||||
#define LOG_INITIALIZED_NOT_ENABLED 1U
|
||||
#define LOG_ENABLED 2U
|
||||
|
||||
#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
|
||||
|
||||
/*
|
||||
* Returns the value we should assume for state (which is not necessarily
|
||||
* accurate; if logging is done before logging has finished initializing, then
|
||||
* we default to doing the safe thing by logging everything).
|
||||
*/
|
||||
unsigned log_var_update_state(log_var_t *log_var);
|
||||
|
||||
/* We factor out the metadata management to allow us to test more easily. */
|
||||
#define log_do_begin(log_var) \
|
||||
if (config_log) { \
|
||||
unsigned log_state = atomic_load_u(&(log_var).state, \
|
||||
ATOMIC_RELAXED); \
|
||||
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
|
||||
log_state = log_var_update_state(&(log_var)); \
|
||||
assert(log_state != LOG_NOT_INITIALIZED); \
|
||||
} \
|
||||
if (log_state == LOG_ENABLED) { \
|
||||
{
|
||||
/* User code executes here. */
|
||||
#define log_do_end(log_var) \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
/*
|
||||
* MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
|
||||
* preprocessing. To work around this, we take all potential extra arguments in
|
||||
* a var-args functions. Since a varargs macro needs at least one argument in
|
||||
* the "...", we accept the format string there, and require that the first
|
||||
* argument in this "..." is a const char *.
|
||||
*/
|
||||
static inline void
|
||||
log_impl_varargs(const char *name, ...) {
|
||||
char buf[JEMALLOC_LOG_BUFSIZE];
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, name);
|
||||
const char *format = va_arg(ap, const char *);
|
||||
size_t dst_offset = 0;
|
||||
dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
|
||||
dst_offset += malloc_vsnprintf(buf + dst_offset,
|
||||
JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
|
||||
dst_offset += malloc_snprintf(buf + dst_offset,
|
||||
JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
|
||||
va_end(ap);
|
||||
|
||||
malloc_write(buf);
|
||||
}
|
||||
|
||||
/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
|
||||
#define LOG(log_var_str, ...) \
|
||||
do { \
|
||||
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
|
||||
log_do_begin(log_var) \
|
||||
log_impl_varargs((log_var).name, __VA_ARGS__); \
|
||||
log_do_end(log_var) \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_LOG_H */
|
||||
102
deps/jemalloc/include/jemalloc/internal/malloc_io.h
vendored
Normal file
102
deps/jemalloc/include/jemalloc/internal/malloc_io.h
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
|
||||
#define JEMALLOC_INTERNAL_MALLOC_IO_H
|
||||
|
||||
#ifdef _WIN32
|
||||
# ifdef _WIN64
|
||||
# define FMT64_PREFIX "ll"
|
||||
# define FMTPTR_PREFIX "ll"
|
||||
# else
|
||||
# define FMT64_PREFIX "ll"
|
||||
# define FMTPTR_PREFIX ""
|
||||
# endif
|
||||
# define FMTd32 "d"
|
||||
# define FMTu32 "u"
|
||||
# define FMTx32 "x"
|
||||
# define FMTd64 FMT64_PREFIX "d"
|
||||
# define FMTu64 FMT64_PREFIX "u"
|
||||
# define FMTx64 FMT64_PREFIX "x"
|
||||
# define FMTdPTR FMTPTR_PREFIX "d"
|
||||
# define FMTuPTR FMTPTR_PREFIX "u"
|
||||
# define FMTxPTR FMTPTR_PREFIX "x"
|
||||
#else
|
||||
# include <inttypes.h>
|
||||
# define FMTd32 PRId32
|
||||
# define FMTu32 PRIu32
|
||||
# define FMTx32 PRIx32
|
||||
# define FMTd64 PRId64
|
||||
# define FMTu64 PRIu64
|
||||
# define FMTx64 PRIx64
|
||||
# define FMTdPTR PRIdPTR
|
||||
# define FMTuPTR PRIuPTR
|
||||
# define FMTxPTR PRIxPTR
|
||||
#endif
|
||||
|
||||
/* Size of stack-allocated buffer passed to buferror(). */
|
||||
#define BUFERROR_BUF 64
|
||||
|
||||
/*
|
||||
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||
* large enough for all possible uses within jemalloc.
|
||||
*/
|
||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||
|
||||
int buferror(int err, char *buf, size_t buflen);
|
||||
uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
|
||||
int base);
|
||||
void malloc_write(const char *s);
|
||||
|
||||
/*
|
||||
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
|
||||
* point math.
|
||||
*/
|
||||
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||
va_list ap);
|
||||
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
/*
|
||||
* The caller can set write_cb to null to choose to print with the
|
||||
* je_malloc_message hook.
|
||||
*/
|
||||
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap);
|
||||
void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
|
||||
|
||||
static inline ssize_t
|
||||
malloc_write_fd(int fd, const void *buf, size_t count) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
|
||||
/*
|
||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||
* the possibility of memory allocation within libc. This is necessary
|
||||
* on FreeBSD; most operating systems do not have this problem though.
|
||||
*
|
||||
* syscall() returns long or int, depending on platform, so capture the
|
||||
* result in the widest plausible type to avoid compiler warnings.
|
||||
*/
|
||||
long result = syscall(SYS_write, fd, buf, count);
|
||||
#else
|
||||
ssize_t result = (ssize_t)write(fd, buf,
|
||||
#ifdef _WIN32
|
||||
(unsigned int)
|
||||
#endif
|
||||
count);
|
||||
#endif
|
||||
return (ssize_t)result;
|
||||
}
|
||||
|
||||
static inline ssize_t
|
||||
malloc_read_fd(int fd, void *buf, size_t count) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
||||
long result = syscall(SYS_read, fd, buf, count);
|
||||
#else
|
||||
ssize_t result = read(fd, buf,
|
||||
#ifdef _WIN32
|
||||
(unsigned int)
|
||||
#endif
|
||||
count);
|
||||
#endif
|
||||
return (ssize_t)result;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
|
||||
115
deps/jemalloc/include/jemalloc/internal/mb.h
vendored
Normal file
115
deps/jemalloc/include/jemalloc/internal/mb.h
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
void mb_write(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
|
||||
#ifdef __i386__
|
||||
/*
|
||||
* According to the Intel Architecture Software Developer's Manual, current
|
||||
* processors execute instructions in order from the perspective of other
|
||||
* processors in a multiprocessor system, but 1) Intel reserves the right to
|
||||
* change that, and 2) the compiler's optimizer could re-order instructions if
|
||||
* there weren't some form of barrier. Therefore, even if running on an
|
||||
* architecture that does not need memory barriers (everything through at least
|
||||
* i686), an "optimizer barrier" is necessary.
|
||||
*/
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
# if 0
|
||||
/* This is a true memory barrier. */
|
||||
asm volatile ("pusha;"
|
||||
"xor %%eax,%%eax;"
|
||||
"cpuid;"
|
||||
"popa;"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
#else
|
||||
/*
|
||||
* This is hopefully enough to keep the compiler from reordering
|
||||
* instructions around this one.
|
||||
*/
|
||||
asm volatile ("nop;"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
#endif
|
||||
}
|
||||
#elif (defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
asm volatile ("sfence"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
#elif defined(__powerpc__)
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
asm volatile ("eieio"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
#elif defined(__sparc64__)
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
asm volatile ("membar #StoreStore"
|
||||
: /* Outputs. */
|
||||
: /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
#elif defined(__tile__)
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
|
||||
__sync_synchronize();
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* This is much slower than a simple memory barrier, but the semantics of mutex
|
||||
* unlock make this work.
|
||||
*/
|
||||
JEMALLOC_INLINE void
|
||||
mb_write(void)
|
||||
{
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
malloc_mutex_init(&mtx);
|
||||
malloc_mutex_lock(&mtx);
|
||||
malloc_mutex_unlock(&mtx);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
288
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
Normal file
288
deps/jemalloc/include/jemalloc/internal/mutex.h
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
typedef enum {
|
||||
/* Can only acquire one mutex of a given witness rank at a time. */
|
||||
malloc_mutex_rank_exclusive,
|
||||
/*
|
||||
* Can acquire multiple mutexes of the same witness rank, but in
|
||||
* address-ascending order only.
|
||||
*/
|
||||
malloc_mutex_address_ordered
|
||||
} malloc_mutex_lock_order_t;
|
||||
|
||||
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||
struct malloc_mutex_s {
|
||||
union {
|
||||
struct {
|
||||
/*
|
||||
* prof_data is defined first to reduce cacheline
|
||||
* bouncing: the data is not touched by the mutex holder
|
||||
* during unlocking, while might be modified by
|
||||
* contenders. Having it before the mutex itself could
|
||||
* avoid prefetching a modified cacheline (for the
|
||||
* unlocking thread).
|
||||
*/
|
||||
mutex_prof_data_t prof_data;
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
SRWLOCK lock;
|
||||
# else
|
||||
CRITICAL_SECTION lock;
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
os_unfair_lock lock;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
pthread_mutex_t lock;
|
||||
malloc_mutex_t *postponed_next;
|
||||
#else
|
||||
pthread_mutex_t lock;
|
||||
#endif
|
||||
/*
|
||||
* Hint flag to avoid exclusive cache line contention
|
||||
* during spin waiting
|
||||
*/
|
||||
atomic_b_t locked;
|
||||
};
|
||||
/*
|
||||
* We only touch witness when configured w/ debug. However we
|
||||
* keep the field in a union when !debug so that we don't have
|
||||
* to pollute the code base with #ifdefs, while avoid paying the
|
||||
* memory cost.
|
||||
*/
|
||||
#if !defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
#if defined(JEMALLOC_DEBUG)
|
||||
witness_t witness;
|
||||
malloc_mutex_lock_order_t lock_order;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Based on benchmark results, a fixed spin with this amount of retries works
|
||||
* well for our critical sections.
|
||||
*/
|
||||
#define MALLOC_MUTEX_MAX_SPIN 250
|
||||
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
|
||||
# else
|
||||
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
|
||||
#else
|
||||
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
|
||||
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
|
||||
#endif
|
||||
|
||||
#define LOCK_PROF_DATA_INITIALIZER \
|
||||
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
|
||||
ATOMIC_INIT(0), 0, NULL, 0}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# if (defined(JEMALLOC_DEBUG))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
|
||||
#else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
extern bool isthreaded;
|
||||
#else
|
||||
# undef isthreaded /* Undo private_namespace.h definition. */
|
||||
# define isthreaded true
|
||||
#endif
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
|
||||
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
bool malloc_mutex_boot(void);
|
||||
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
|
||||
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
||||
|
||||
static inline void
|
||||
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
||||
MALLOC_MUTEX_LOCK(mutex);
|
||||
atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
|
||||
return MALLOC_MUTEX_TRYLOCK(mutex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (config_stats) {
|
||||
mutex_prof_data_t *data = &mutex->prof_data;
|
||||
data->n_lock_ops++;
|
||||
if (data->prev_owner != tsdn) {
|
||||
data->prev_owner = tsdn;
|
||||
data->n_owner_switches++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Trylock: return false if the lock is successfully acquired. */
|
||||
static inline bool
|
||||
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
|
||||
return true;
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Aggregate lock prof data. */
|
||||
static inline void
|
||||
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
||||
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
||||
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
||||
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
|
||||
}
|
||||
|
||||
sum->n_wait_times += data->n_wait_times;
|
||||
sum->n_spin_acquired += data->n_spin_acquired;
|
||||
|
||||
if (sum->max_n_thds < data->max_n_thds) {
|
||||
sum->max_n_thds = data->max_n_thds;
|
||||
}
|
||||
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
|
||||
&data->n_waiting_thds, ATOMIC_RELAXED);
|
||||
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
|
||||
ATOMIC_RELAXED);
|
||||
sum->n_owner_switches += data->n_owner_switches;
|
||||
sum->n_lock_ops += data->n_lock_ops;
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
if (malloc_mutex_trylock_final(mutex)) {
|
||||
malloc_mutex_lock_slow(mutex);
|
||||
atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
|
||||
}
|
||||
mutex_owner_stats_update(tsdn, mutex);
|
||||
}
|
||||
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
|
||||
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
if (isthreaded) {
|
||||
MALLOC_MUTEX_UNLOCK(mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||
}
|
||||
|
||||
/* Copy the prof data from mutex for processing. */
|
||||
static inline void
|
||||
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
|
||||
/*
|
||||
* Not *really* allowed (we shouldn't be doing non-atomic loads of
|
||||
* atomic data), but the mutex protection makes this safe, and writing
|
||||
* a member-for-member copy is tedious for this situation.
|
||||
*/
|
||||
*data = *source;
|
||||
/* n_wait_thds is not reported (modified w/o locking). */
|
||||
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||
malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *source = &mutex->prof_data;
|
||||
/* Can only read holding the mutex. */
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
|
||||
nstime_add(&data->tot_wait_time, &source->tot_wait_time);
|
||||
if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
|
||||
nstime_copy(&data->max_wait_time, &source->max_wait_time);
|
||||
}
|
||||
data->n_wait_times += source->n_wait_times;
|
||||
data->n_spin_acquired += source->n_spin_acquired;
|
||||
if (data->max_n_thds < source->max_n_thds) {
|
||||
data->max_n_thds = source->max_n_thds;
|
||||
}
|
||||
/* n_wait_thds is not reported. */
|
||||
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||
data->n_owner_switches += source->n_owner_switches;
|
||||
data->n_lock_ops += source->n_lock_ops;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_H */
|
||||
94
deps/jemalloc/include/jemalloc/internal/mutex_pool.h
vendored
Normal file
94
deps/jemalloc/include/jemalloc/internal/mutex_pool.h
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
|
||||
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
/* We do mod reductions by this value, so it should be kept a power of 2. */
|
||||
#define MUTEX_POOL_SIZE 256
|
||||
|
||||
typedef struct mutex_pool_s mutex_pool_t;
|
||||
struct mutex_pool_s {
|
||||
malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
|
||||
};
|
||||
|
||||
bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
|
||||
|
||||
/* Internal helper - not meant to be called outside this module. */
|
||||
static inline malloc_mutex_t *
|
||||
mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
|
||||
size_t hash_result[2];
|
||||
hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
|
||||
return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
|
||||
for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
|
||||
* You're not allowed to acquire mutexes in the pool one at a time. You have to
|
||||
* acquire all the mutexes you'll need in a single function call, and then
|
||||
* release them all in a single function call.
|
||||
*/
|
||||
|
||||
static inline void
|
||||
mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
|
||||
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
|
||||
malloc_mutex_lock(tsdn, mutex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
|
||||
uintptr_t key2) {
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
|
||||
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
|
||||
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
|
||||
if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
|
||||
malloc_mutex_lock(tsdn, mutex1);
|
||||
malloc_mutex_lock(tsdn, mutex2);
|
||||
} else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
|
||||
malloc_mutex_lock(tsdn, mutex1);
|
||||
} else {
|
||||
malloc_mutex_lock(tsdn, mutex2);
|
||||
malloc_mutex_lock(tsdn, mutex1);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
|
||||
uintptr_t key2) {
|
||||
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
|
||||
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
|
||||
if (mutex1 == mutex2) {
|
||||
malloc_mutex_unlock(tsdn, mutex1);
|
||||
} else {
|
||||
malloc_mutex_unlock(tsdn, mutex1);
|
||||
malloc_mutex_unlock(tsdn, mutex2);
|
||||
}
|
||||
|
||||
mutex_pool_assert_not_held(tsdn, pool);
|
||||
}
|
||||
|
||||
static inline void
|
||||
mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||
malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */
|
||||
108
deps/jemalloc/include/jemalloc/internal/mutex_prof.h
vendored
Normal file
108
deps/jemalloc/include/jemalloc/internal/mutex_prof.h
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
|
||||
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
#include "jemalloc/internal/tsd_types.h"
|
||||
|
||||
#define MUTEX_PROF_GLOBAL_MUTEXES \
|
||||
OP(background_thread) \
|
||||
OP(ctl) \
|
||||
OP(prof)
|
||||
|
||||
typedef enum {
|
||||
#define OP(mtx) global_prof_mutex_##mtx,
|
||||
MUTEX_PROF_GLOBAL_MUTEXES
|
||||
#undef OP
|
||||
mutex_prof_num_global_mutexes
|
||||
} mutex_prof_global_ind_t;
|
||||
|
||||
#define MUTEX_PROF_ARENA_MUTEXES \
|
||||
OP(large) \
|
||||
OP(extent_avail) \
|
||||
OP(extents_dirty) \
|
||||
OP(extents_muzzy) \
|
||||
OP(extents_retained) \
|
||||
OP(decay_dirty) \
|
||||
OP(decay_muzzy) \
|
||||
OP(base) \
|
||||
OP(tcache_list)
|
||||
|
||||
typedef enum {
|
||||
#define OP(mtx) arena_prof_mutex_##mtx,
|
||||
MUTEX_PROF_ARENA_MUTEXES
|
||||
#undef OP
|
||||
mutex_prof_num_arena_mutexes
|
||||
} mutex_prof_arena_ind_t;
|
||||
|
||||
/*
|
||||
* The forth parameter is a boolean value that is true for derived rate counters
|
||||
* and false for real ones.
|
||||
*/
|
||||
#define MUTEX_PROF_UINT64_COUNTERS \
|
||||
OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
|
||||
OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
|
||||
OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
|
||||
OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
|
||||
OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
|
||||
OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
|
||||
OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \
|
||||
OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
|
||||
OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
|
||||
OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
|
||||
OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
|
||||
|
||||
#define MUTEX_PROF_UINT32_COUNTERS \
|
||||
OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
|
||||
|
||||
#define MUTEX_PROF_COUNTERS \
|
||||
MUTEX_PROF_UINT64_COUNTERS \
|
||||
MUTEX_PROF_UINT32_COUNTERS
|
||||
|
||||
#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
|
||||
|
||||
#define COUNTER_ENUM(counter_list, t) \
|
||||
typedef enum { \
|
||||
counter_list \
|
||||
mutex_prof_num_##t##_counters \
|
||||
} mutex_prof_##t##_counter_ind_t;
|
||||
|
||||
COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
|
||||
COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
|
||||
|
||||
#undef COUNTER_ENUM
|
||||
#undef OP
|
||||
|
||||
typedef struct {
|
||||
/*
|
||||
* Counters touched on the slow path, i.e. when there is lock
|
||||
* contention. We update them once we have the lock.
|
||||
*/
|
||||
/* Total time (in nano seconds) spent waiting on this mutex. */
|
||||
nstime_t tot_wait_time;
|
||||
/* Max time (in nano seconds) spent on a single lock operation. */
|
||||
nstime_t max_wait_time;
|
||||
/* # of times have to wait for this mutex (after spinning). */
|
||||
uint64_t n_wait_times;
|
||||
/* # of times acquired the mutex through local spinning. */
|
||||
uint64_t n_spin_acquired;
|
||||
/* Max # of threads waiting for the mutex at the same time. */
|
||||
uint32_t max_n_thds;
|
||||
/* Current # of threads waiting on the lock. Atomic synced. */
|
||||
atomic_u32_t n_waiting_thds;
|
||||
|
||||
/*
|
||||
* Data touched on the fast path. These are modified right after we
|
||||
* grab the lock, so it's placed closest to the end (i.e. right before
|
||||
* the lock) so that we have a higher chance of them being on the same
|
||||
* cacheline.
|
||||
*/
|
||||
/* # of times the mutex holder is different than the previous one. */
|
||||
uint64_t n_owner_switches;
|
||||
/* Previous mutex holder, to facilitate n_owner_switches. */
|
||||
tsdn_t *prev_owner;
|
||||
/* # of lock() operations in total. */
|
||||
uint64_t n_lock_ops;
|
||||
} mutex_prof_data_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
|
||||
34
deps/jemalloc/include/jemalloc/internal/nstime.h
vendored
Normal file
34
deps/jemalloc/include/jemalloc/internal/nstime.h
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
#ifndef JEMALLOC_INTERNAL_NSTIME_H
|
||||
#define JEMALLOC_INTERNAL_NSTIME_H
|
||||
|
||||
/* Maximum supported number of seconds (~584 years). */
|
||||
#define NSTIME_SEC_MAX KQU(18446744072)
|
||||
#define NSTIME_ZERO_INITIALIZER {0}
|
||||
|
||||
typedef struct {
|
||||
uint64_t ns;
|
||||
} nstime_t;
|
||||
|
||||
void nstime_init(nstime_t *time, uint64_t ns);
|
||||
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
|
||||
uint64_t nstime_ns(const nstime_t *time);
|
||||
uint64_t nstime_sec(const nstime_t *time);
|
||||
uint64_t nstime_msec(const nstime_t *time);
|
||||
uint64_t nstime_nsec(const nstime_t *time);
|
||||
void nstime_copy(nstime_t *time, const nstime_t *source);
|
||||
int nstime_compare(const nstime_t *a, const nstime_t *b);
|
||||
void nstime_add(nstime_t *time, const nstime_t *addend);
|
||||
void nstime_iadd(nstime_t *time, uint64_t addend);
|
||||
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
|
||||
void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
|
||||
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
|
||||
void nstime_idivide(nstime_t *time, uint64_t divisor);
|
||||
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
|
||||
|
||||
typedef bool (nstime_monotonic_t)(void);
|
||||
extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
|
||||
|
||||
typedef bool (nstime_update_t)(nstime_t *);
|
||||
extern nstime_update_t *JET_MUTABLE nstime_update;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_NSTIME_H */
|
||||
88
deps/jemalloc/include/jemalloc/internal/pages.h
vendored
Normal file
88
deps/jemalloc/include/jemalloc/internal/pages.h
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||
|
||||
/* Page size. LG_PAGE is determined by the configure script. */
|
||||
#ifdef PAGE_MASK
|
||||
# undef PAGE_MASK
|
||||
#endif
|
||||
#define PAGE ((size_t)(1U << LG_PAGE))
|
||||
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||
/* Return the page base address for the page containing address a. */
|
||||
#define PAGE_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~PAGE_MASK))
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define PAGE_CEILING(s) \
|
||||
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
||||
|
||||
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
|
||||
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
|
||||
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
|
||||
/* Return the huge page base address for the huge page containing address a. */
|
||||
#define HUGEPAGE_ADDR2BASE(a) \
|
||||
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define HUGEPAGE_CEILING(s) \
|
||||
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
|
||||
|
||||
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
|
||||
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define PAGES_CAN_PURGE_LAZY
|
||||
#endif
|
||||
/*
|
||||
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
|
||||
*
|
||||
* The only supported way to hard-purge on Windows is to decommit and then
|
||||
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
|
||||
* propagate the "poisoned" memory state. Since we typically decommit as the
|
||||
* next step after purging on Windows anyway, there's no point in adding such
|
||||
* complexity.
|
||||
*/
|
||||
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
|
||||
defined(JEMALLOC_MAPS_COALESCE))
|
||||
# define PAGES_CAN_PURGE_FORCED
|
||||
#endif
|
||||
|
||||
static const bool pages_can_purge_lazy =
|
||||
#ifdef PAGES_CAN_PURGE_LAZY
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool pages_can_purge_forced =
|
||||
#ifdef PAGES_CAN_PURGE_FORCED
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
typedef enum {
|
||||
thp_mode_default = 0, /* Do not change hugepage settings. */
|
||||
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
|
||||
thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
|
||||
|
||||
thp_mode_names_limit = 3, /* Used for option processing. */
|
||||
thp_mode_not_supported = 3 /* No THP support detected. */
|
||||
} thp_mode_t;
|
||||
|
||||
#define THP_MODE_DEFAULT thp_mode_default
|
||||
extern thp_mode_t opt_thp;
|
||||
extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
|
||||
extern const char *thp_mode_names[];
|
||||
|
||||
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
|
||||
void pages_unmap(void *addr, size_t size);
|
||||
bool pages_commit(void *addr, size_t size);
|
||||
bool pages_decommit(void *addr, size_t size);
|
||||
bool pages_purge_lazy(void *addr, size_t size);
|
||||
bool pages_purge_forced(void *addr, size_t size);
|
||||
bool pages_huge(void *addr, size_t size);
|
||||
bool pages_nohuge(void *addr, size_t size);
|
||||
bool pages_dontdump(void *addr, size_t size);
|
||||
bool pages_dodump(void *addr, size_t size);
|
||||
bool pages_boot(void);
|
||||
void pages_set_thp_state (void *ptr, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
|
||||
391
deps/jemalloc/include/jemalloc/internal/ph.h
vendored
Normal file
391
deps/jemalloc/include/jemalloc/internal/ph.h
vendored
Normal file
@@ -0,0 +1,391 @@
|
||||
/*
|
||||
* A Pairing Heap implementation.
|
||||
*
|
||||
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
|
||||
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
|
||||
*
|
||||
* With auxiliary twopass list, described in a follow on paper.
|
||||
*
|
||||
* "Pairing Heaps: Experiments and Analysis"
|
||||
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef PH_H_
|
||||
#define PH_H_
|
||||
|
||||
/* Node structure. */
|
||||
#define phn(a_type) \
|
||||
struct { \
|
||||
a_type *phn_prev; \
|
||||
a_type *phn_next; \
|
||||
a_type *phn_lchild; \
|
||||
}
|
||||
|
||||
/* Root structure. */
|
||||
#define ph(a_type) \
|
||||
struct { \
|
||||
a_type *ph_root; \
|
||||
}
|
||||
|
||||
/* Internal utility macros. */
|
||||
#define phn_lchild_get(a_type, a_field, a_phn) \
|
||||
(a_phn->a_field.phn_lchild)
|
||||
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
|
||||
a_phn->a_field.phn_lchild = a_lchild; \
|
||||
} while (0)
|
||||
|
||||
#define phn_next_get(a_type, a_field, a_phn) \
|
||||
(a_phn->a_field.phn_next)
|
||||
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
|
||||
a_phn->a_field.phn_prev = a_prev; \
|
||||
} while (0)
|
||||
|
||||
#define phn_prev_get(a_type, a_field, a_phn) \
|
||||
(a_phn->a_field.phn_prev)
|
||||
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
|
||||
a_phn->a_field.phn_next = a_next; \
|
||||
} while (0)
|
||||
|
||||
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
|
||||
a_type *phn0child; \
|
||||
\
|
||||
assert(a_phn0 != NULL); \
|
||||
assert(a_phn1 != NULL); \
|
||||
assert(a_cmp(a_phn0, a_phn1) <= 0); \
|
||||
\
|
||||
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
|
||||
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
|
||||
phn_next_set(a_type, a_field, a_phn1, phn0child); \
|
||||
if (phn0child != NULL) { \
|
||||
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
|
||||
} \
|
||||
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
|
||||
} while (0)
|
||||
|
||||
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
|
||||
if (a_phn0 == NULL) { \
|
||||
r_phn = a_phn1; \
|
||||
} else if (a_phn1 == NULL) { \
|
||||
r_phn = a_phn0; \
|
||||
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
|
||||
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
|
||||
a_cmp); \
|
||||
r_phn = a_phn0; \
|
||||
} else { \
|
||||
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
|
||||
a_cmp); \
|
||||
r_phn = a_phn1; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||
a_type *head = NULL; \
|
||||
a_type *tail = NULL; \
|
||||
a_type *phn0 = a_phn; \
|
||||
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
|
||||
\
|
||||
/* \
|
||||
* Multipass merge, wherein the first two elements of a FIFO \
|
||||
* are repeatedly merged, and each result is appended to the \
|
||||
* singly linked FIFO, until the FIFO contains only a single \
|
||||
* element. We start with a sibling list but no reference to \
|
||||
* its tail, so we do a single pass over the sibling list to \
|
||||
* populate the FIFO. \
|
||||
*/ \
|
||||
if (phn1 != NULL) { \
|
||||
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
|
||||
if (phnrest != NULL) { \
|
||||
phn_prev_set(a_type, a_field, phnrest, NULL); \
|
||||
} \
|
||||
phn_prev_set(a_type, a_field, phn0, NULL); \
|
||||
phn_next_set(a_type, a_field, phn0, NULL); \
|
||||
phn_prev_set(a_type, a_field, phn1, NULL); \
|
||||
phn_next_set(a_type, a_field, phn1, NULL); \
|
||||
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
|
||||
head = tail = phn0; \
|
||||
phn0 = phnrest; \
|
||||
while (phn0 != NULL) { \
|
||||
phn1 = phn_next_get(a_type, a_field, phn0); \
|
||||
if (phn1 != NULL) { \
|
||||
phnrest = phn_next_get(a_type, a_field, \
|
||||
phn1); \
|
||||
if (phnrest != NULL) { \
|
||||
phn_prev_set(a_type, a_field, \
|
||||
phnrest, NULL); \
|
||||
} \
|
||||
phn_prev_set(a_type, a_field, phn0, \
|
||||
NULL); \
|
||||
phn_next_set(a_type, a_field, phn0, \
|
||||
NULL); \
|
||||
phn_prev_set(a_type, a_field, phn1, \
|
||||
NULL); \
|
||||
phn_next_set(a_type, a_field, phn1, \
|
||||
NULL); \
|
||||
phn_merge(a_type, a_field, phn0, phn1, \
|
||||
a_cmp, phn0); \
|
||||
phn_next_set(a_type, a_field, tail, \
|
||||
phn0); \
|
||||
tail = phn0; \
|
||||
phn0 = phnrest; \
|
||||
} else { \
|
||||
phn_next_set(a_type, a_field, tail, \
|
||||
phn0); \
|
||||
tail = phn0; \
|
||||
phn0 = NULL; \
|
||||
} \
|
||||
} \
|
||||
phn0 = head; \
|
||||
phn1 = phn_next_get(a_type, a_field, phn0); \
|
||||
if (phn1 != NULL) { \
|
||||
while (true) { \
|
||||
head = phn_next_get(a_type, a_field, \
|
||||
phn1); \
|
||||
assert(phn_prev_get(a_type, a_field, \
|
||||
phn0) == NULL); \
|
||||
phn_next_set(a_type, a_field, phn0, \
|
||||
NULL); \
|
||||
assert(phn_prev_get(a_type, a_field, \
|
||||
phn1) == NULL); \
|
||||
phn_next_set(a_type, a_field, phn1, \
|
||||
NULL); \
|
||||
phn_merge(a_type, a_field, phn0, phn1, \
|
||||
a_cmp, phn0); \
|
||||
if (head == NULL) { \
|
||||
break; \
|
||||
} \
|
||||
phn_next_set(a_type, a_field, tail, \
|
||||
phn0); \
|
||||
tail = phn0; \
|
||||
phn0 = head; \
|
||||
phn1 = phn_next_get(a_type, a_field, \
|
||||
phn0); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
r_phn = phn0; \
|
||||
} while (0)
|
||||
|
||||
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
|
||||
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
|
||||
if (phn != NULL) { \
|
||||
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
|
||||
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
|
||||
phn_prev_set(a_type, a_field, phn, NULL); \
|
||||
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
|
||||
assert(phn_next_get(a_type, a_field, phn) == NULL); \
|
||||
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
|
||||
a_ph->ph_root); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
|
||||
if (lchild == NULL) { \
|
||||
r_phn = NULL; \
|
||||
} else { \
|
||||
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
|
||||
r_phn); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* The ph_proto() macro generates function prototypes that correspond to the
|
||||
* functions generated by an equivalently parameterized call to ph_gen().
|
||||
*/
|
||||
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
|
||||
a_attr void a_prefix##new(a_ph_type *ph); \
|
||||
a_attr bool a_prefix##empty(a_ph_type *ph); \
|
||||
a_attr a_type *a_prefix##first(a_ph_type *ph); \
|
||||
a_attr a_type *a_prefix##any(a_ph_type *ph); \
|
||||
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
|
||||
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
|
||||
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
|
||||
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
|
||||
|
||||
/*
|
||||
* The ph_gen() macro generates a type-specific pairing heap implementation,
|
||||
* based on the above cpp macros.
|
||||
*/
|
||||
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_ph_type *ph) { \
|
||||
memset(ph, 0, sizeof(ph(a_type))); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_ph_type *ph) { \
|
||||
return (ph->ph_root == NULL); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_ph_type *ph) { \
|
||||
if (ph->ph_root == NULL) { \
|
||||
return NULL; \
|
||||
} \
|
||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||
return ph->ph_root; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##any(a_ph_type *ph) { \
|
||||
if (ph->ph_root == NULL) { \
|
||||
return NULL; \
|
||||
} \
|
||||
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
|
||||
if (aux != NULL) { \
|
||||
return aux; \
|
||||
} \
|
||||
return ph->ph_root; \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
|
||||
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
|
||||
\
|
||||
/* \
|
||||
* Treat the root as an aux list during insertion, and lazily \
|
||||
* merge during a_prefix##remove_first(). For elements that \
|
||||
* are inserted, then removed via a_prefix##remove() before the \
|
||||
* aux list is ever processed, this makes insert/remove \
|
||||
* constant-time, whereas eager merging would make insert \
|
||||
* O(log n). \
|
||||
*/ \
|
||||
if (ph->ph_root == NULL) { \
|
||||
ph->ph_root = phn; \
|
||||
} else { \
|
||||
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
|
||||
a_field, ph->ph_root)); \
|
||||
if (phn_next_get(a_type, a_field, ph->ph_root) != \
|
||||
NULL) { \
|
||||
phn_prev_set(a_type, a_field, \
|
||||
phn_next_get(a_type, a_field, ph->ph_root), \
|
||||
phn); \
|
||||
} \
|
||||
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
|
||||
phn_next_set(a_type, a_field, ph->ph_root, phn); \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##remove_first(a_ph_type *ph) { \
|
||||
a_type *ret; \
|
||||
\
|
||||
if (ph->ph_root == NULL) { \
|
||||
return NULL; \
|
||||
} \
|
||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||
\
|
||||
ret = ph->ph_root; \
|
||||
\
|
||||
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
|
||||
ph->ph_root); \
|
||||
\
|
||||
return ret; \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##remove_any(a_ph_type *ph) { \
|
||||
/* \
|
||||
* Remove the most recently inserted aux list element, or the \
|
||||
* root if the aux list is empty. This has the effect of \
|
||||
* behaving as a LIFO (and insertion/removal is therefore \
|
||||
* constant-time) if a_prefix##[remove_]first() are never \
|
||||
* called. \
|
||||
*/ \
|
||||
if (ph->ph_root == NULL) { \
|
||||
return NULL; \
|
||||
} \
|
||||
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
|
||||
if (ret != NULL) { \
|
||||
a_type *aux = phn_next_get(a_type, a_field, ret); \
|
||||
phn_next_set(a_type, a_field, ph->ph_root, aux); \
|
||||
if (aux != NULL) { \
|
||||
phn_prev_set(a_type, a_field, aux, \
|
||||
ph->ph_root); \
|
||||
} \
|
||||
return ret; \
|
||||
} \
|
||||
ret = ph->ph_root; \
|
||||
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
|
||||
ph->ph_root); \
|
||||
return ret; \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
|
||||
a_type *replace, *parent; \
|
||||
\
|
||||
if (ph->ph_root == phn) { \
|
||||
/* \
|
||||
* We can delete from aux list without merging it, but \
|
||||
* we need to merge if we are dealing with the root \
|
||||
* node and it has children. \
|
||||
*/ \
|
||||
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
|
||||
ph->ph_root = phn_next_get(a_type, a_field, \
|
||||
phn); \
|
||||
if (ph->ph_root != NULL) { \
|
||||
phn_prev_set(a_type, a_field, \
|
||||
ph->ph_root, NULL); \
|
||||
} \
|
||||
return; \
|
||||
} \
|
||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||
if (ph->ph_root == phn) { \
|
||||
ph_merge_children(a_type, a_field, ph->ph_root, \
|
||||
a_cmp, ph->ph_root); \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
/* Get parent (if phn is leftmost child) before mutating. */ \
|
||||
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
|
||||
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
|
||||
parent = NULL; \
|
||||
} \
|
||||
} \
|
||||
/* Find a possible replacement node, and link to parent. */ \
|
||||
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
|
||||
/* Set next/prev for sibling linked list. */ \
|
||||
if (replace != NULL) { \
|
||||
if (parent != NULL) { \
|
||||
phn_prev_set(a_type, a_field, replace, parent); \
|
||||
phn_lchild_set(a_type, a_field, parent, \
|
||||
replace); \
|
||||
} else { \
|
||||
phn_prev_set(a_type, a_field, replace, \
|
||||
phn_prev_get(a_type, a_field, phn)); \
|
||||
if (phn_prev_get(a_type, a_field, phn) != \
|
||||
NULL) { \
|
||||
phn_next_set(a_type, a_field, \
|
||||
phn_prev_get(a_type, a_field, phn), \
|
||||
replace); \
|
||||
} \
|
||||
} \
|
||||
phn_next_set(a_type, a_field, replace, \
|
||||
phn_next_get(a_type, a_field, phn)); \
|
||||
if (phn_next_get(a_type, a_field, phn) != NULL) { \
|
||||
phn_prev_set(a_type, a_field, \
|
||||
phn_next_get(a_type, a_field, phn), \
|
||||
replace); \
|
||||
} \
|
||||
} else { \
|
||||
if (parent != NULL) { \
|
||||
a_type *next = phn_next_get(a_type, a_field, \
|
||||
phn); \
|
||||
phn_lchild_set(a_type, a_field, parent, next); \
|
||||
if (next != NULL) { \
|
||||
phn_prev_set(a_type, a_field, next, \
|
||||
parent); \
|
||||
} \
|
||||
} else { \
|
||||
assert(phn_prev_get(a_type, a_field, phn) != \
|
||||
NULL); \
|
||||
phn_next_set(a_type, a_field, \
|
||||
phn_prev_get(a_type, a_field, phn), \
|
||||
phn_next_get(a_type, a_field, phn)); \
|
||||
} \
|
||||
if (phn_next_get(a_type, a_field, phn) != NULL) { \
|
||||
phn_prev_set(a_type, a_field, \
|
||||
phn_next_get(a_type, a_field, phn), \
|
||||
phn_prev_get(a_type, a_field, phn)); \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
#endif /* PH_H_ */
|
||||
422
deps/jemalloc/include/jemalloc/internal/private_namespace.h
vendored
Normal file
422
deps/jemalloc/include/jemalloc/internal/private_namespace.h
vendored
Normal file
@@ -0,0 +1,422 @@
|
||||
#define a0dalloc JEMALLOC_N(a0dalloc)
|
||||
#define a0malloc JEMALLOC_N(a0malloc)
|
||||
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
|
||||
#define arena_cleanup JEMALLOC_N(arena_cleanup)
|
||||
#define arena_init JEMALLOC_N(arena_init)
|
||||
#define arena_migrate JEMALLOC_N(arena_migrate)
|
||||
#define arenas JEMALLOC_N(arenas)
|
||||
#define arena_set JEMALLOC_N(arena_set)
|
||||
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||
#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
|
||||
#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
|
||||
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
|
||||
#define bootstrap_free JEMALLOC_N(bootstrap_free)
|
||||
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
|
||||
#define free_default JEMALLOC_N(free_default)
|
||||
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
|
||||
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
|
||||
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
|
||||
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
|
||||
#define je_sdallocx_noflags JEMALLOC_N(je_sdallocx_noflags)
|
||||
#define malloc_default JEMALLOC_N(malloc_default)
|
||||
#define malloc_initialized JEMALLOC_N(malloc_initialized)
|
||||
#define malloc_slow JEMALLOC_N(malloc_slow)
|
||||
#define manual_arena_base JEMALLOC_N(manual_arena_base)
|
||||
#define narenas_auto JEMALLOC_N(narenas_auto)
|
||||
#define narenas_total_get JEMALLOC_N(narenas_total_get)
|
||||
#define ncpus JEMALLOC_N(ncpus)
|
||||
#define opt_abort JEMALLOC_N(opt_abort)
|
||||
#define opt_abort_conf JEMALLOC_N(opt_abort_conf)
|
||||
#define opt_confirm_conf JEMALLOC_N(opt_confirm_conf)
|
||||
#define opt_junk JEMALLOC_N(opt_junk)
|
||||
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
|
||||
#define opt_junk_free JEMALLOC_N(opt_junk_free)
|
||||
#define opt_narenas JEMALLOC_N(opt_narenas)
|
||||
#define opt_utrace JEMALLOC_N(opt_utrace)
|
||||
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
|
||||
#define opt_zero JEMALLOC_N(opt_zero)
|
||||
#define sdallocx_default JEMALLOC_N(sdallocx_default)
|
||||
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
|
||||
#define arena_bin_choose_lock JEMALLOC_N(arena_bin_choose_lock)
|
||||
#define arena_boot JEMALLOC_N(arena_boot)
|
||||
#define arena_choose_huge JEMALLOC_N(arena_choose_huge)
|
||||
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
|
||||
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
||||
#define arena_dalloc_promoted JEMALLOC_N(arena_dalloc_promoted)
|
||||
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||
#define arena_decay JEMALLOC_N(arena_decay)
|
||||
#define arena_destroy JEMALLOC_N(arena_destroy)
|
||||
#define arena_dirty_decay_ms_default_get JEMALLOC_N(arena_dirty_decay_ms_default_get)
|
||||
#define arena_dirty_decay_ms_default_set JEMALLOC_N(arena_dirty_decay_ms_default_set)
|
||||
#define arena_dirty_decay_ms_get JEMALLOC_N(arena_dirty_decay_ms_get)
|
||||
#define arena_dirty_decay_ms_set JEMALLOC_N(arena_dirty_decay_ms_set)
|
||||
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
||||
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
||||
#define arena_extent_alloc_large JEMALLOC_N(arena_extent_alloc_large)
|
||||
#define arena_extent_dalloc_large_prep JEMALLOC_N(arena_extent_dalloc_large_prep)
|
||||
#define arena_extent_ralloc_large_expand JEMALLOC_N(arena_extent_ralloc_large_expand)
|
||||
#define arena_extent_ralloc_large_shrink JEMALLOC_N(arena_extent_ralloc_large_shrink)
|
||||
#define arena_extents_dirty_dalloc JEMALLOC_N(arena_extents_dirty_dalloc)
|
||||
#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
|
||||
#define arena_init_huge JEMALLOC_N(arena_init_huge)
|
||||
#define arena_is_huge JEMALLOC_N(arena_is_huge)
|
||||
#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
|
||||
#define arena_muzzy_decay_ms_default_get JEMALLOC_N(arena_muzzy_decay_ms_default_get)
|
||||
#define arena_muzzy_decay_ms_default_set JEMALLOC_N(arena_muzzy_decay_ms_default_set)
|
||||
#define arena_muzzy_decay_ms_get JEMALLOC_N(arena_muzzy_decay_ms_get)
|
||||
#define arena_muzzy_decay_ms_set JEMALLOC_N(arena_muzzy_decay_ms_set)
|
||||
#define arena_new JEMALLOC_N(arena_new)
|
||||
#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
|
||||
#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
|
||||
#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
|
||||
#define arena_palloc JEMALLOC_N(arena_palloc)
|
||||
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
|
||||
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
|
||||
#define arena_prefork0 JEMALLOC_N(arena_prefork0)
|
||||
#define arena_prefork1 JEMALLOC_N(arena_prefork1)
|
||||
#define arena_prefork2 JEMALLOC_N(arena_prefork2)
|
||||
#define arena_prefork3 JEMALLOC_N(arena_prefork3)
|
||||
#define arena_prefork4 JEMALLOC_N(arena_prefork4)
|
||||
#define arena_prefork5 JEMALLOC_N(arena_prefork5)
|
||||
#define arena_prefork6 JEMALLOC_N(arena_prefork6)
|
||||
#define arena_prefork7 JEMALLOC_N(arena_prefork7)
|
||||
#define arena_prof_promote JEMALLOC_N(arena_prof_promote)
|
||||
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
||||
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
||||
#define arena_reset JEMALLOC_N(arena_reset)
|
||||
#define arena_retain_grow_limit_get_set JEMALLOC_N(arena_retain_grow_limit_get_set)
|
||||
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
||||
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
||||
#define h_steps JEMALLOC_N(h_steps)
|
||||
#define opt_dirty_decay_ms JEMALLOC_N(opt_dirty_decay_ms)
|
||||
#define opt_muzzy_decay_ms JEMALLOC_N(opt_muzzy_decay_ms)
|
||||
#define opt_oversize_threshold JEMALLOC_N(opt_oversize_threshold)
|
||||
#define opt_percpu_arena JEMALLOC_N(opt_percpu_arena)
|
||||
#define oversize_threshold JEMALLOC_N(oversize_threshold)
|
||||
#define percpu_arena_mode_names JEMALLOC_N(percpu_arena_mode_names)
|
||||
#define background_thread_boot0 JEMALLOC_N(background_thread_boot0)
|
||||
#define background_thread_boot1 JEMALLOC_N(background_thread_boot1)
|
||||
#define background_thread_create JEMALLOC_N(background_thread_create)
|
||||
#define background_thread_ctl_init JEMALLOC_N(background_thread_ctl_init)
|
||||
#define background_thread_enabled_state JEMALLOC_N(background_thread_enabled_state)
|
||||
#define background_thread_info JEMALLOC_N(background_thread_info)
|
||||
#define background_thread_interval_check JEMALLOC_N(background_thread_interval_check)
|
||||
#define background_thread_lock JEMALLOC_N(background_thread_lock)
|
||||
#define background_thread_postfork_child JEMALLOC_N(background_thread_postfork_child)
|
||||
#define background_thread_postfork_parent JEMALLOC_N(background_thread_postfork_parent)
|
||||
#define background_thread_prefork0 JEMALLOC_N(background_thread_prefork0)
|
||||
#define background_thread_prefork1 JEMALLOC_N(background_thread_prefork1)
|
||||
#define background_threads_disable JEMALLOC_N(background_threads_disable)
|
||||
#define background_threads_enable JEMALLOC_N(background_threads_enable)
|
||||
#define background_thread_stats_read JEMALLOC_N(background_thread_stats_read)
|
||||
#define max_background_threads JEMALLOC_N(max_background_threads)
|
||||
#define n_background_threads JEMALLOC_N(n_background_threads)
|
||||
#define opt_background_thread JEMALLOC_N(opt_background_thread)
|
||||
#define opt_max_background_threads JEMALLOC_N(opt_max_background_threads)
|
||||
#define pthread_create_wrapper JEMALLOC_N(pthread_create_wrapper)
|
||||
#define b0get JEMALLOC_N(b0get)
|
||||
#define base_alloc JEMALLOC_N(base_alloc)
|
||||
#define base_alloc_extent JEMALLOC_N(base_alloc_extent)
|
||||
#define base_boot JEMALLOC_N(base_boot)
|
||||
#define base_delete JEMALLOC_N(base_delete)
|
||||
#define base_extent_hooks_get JEMALLOC_N(base_extent_hooks_get)
|
||||
#define base_extent_hooks_set JEMALLOC_N(base_extent_hooks_set)
|
||||
#define base_new JEMALLOC_N(base_new)
|
||||
#define base_postfork_child JEMALLOC_N(base_postfork_child)
|
||||
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
|
||||
#define base_prefork JEMALLOC_N(base_prefork)
|
||||
#define base_stats_get JEMALLOC_N(base_stats_get)
|
||||
#define metadata_thp_mode_names JEMALLOC_N(metadata_thp_mode_names)
|
||||
#define opt_metadata_thp JEMALLOC_N(opt_metadata_thp)
|
||||
#define bin_boot JEMALLOC_N(bin_boot)
|
||||
#define bin_infos JEMALLOC_N(bin_infos)
|
||||
#define bin_init JEMALLOC_N(bin_init)
|
||||
#define bin_postfork_child JEMALLOC_N(bin_postfork_child)
|
||||
#define bin_postfork_parent JEMALLOC_N(bin_postfork_parent)
|
||||
#define bin_prefork JEMALLOC_N(bin_prefork)
|
||||
#define bin_shard_sizes_boot JEMALLOC_N(bin_shard_sizes_boot)
|
||||
#define bin_update_shard_size JEMALLOC_N(bin_update_shard_size)
|
||||
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
|
||||
#define bitmap_init JEMALLOC_N(bitmap_init)
|
||||
#define bitmap_size JEMALLOC_N(bitmap_size)
|
||||
#define ckh_count JEMALLOC_N(ckh_count)
|
||||
#define ckh_delete JEMALLOC_N(ckh_delete)
|
||||
#define ckh_insert JEMALLOC_N(ckh_insert)
|
||||
#define ckh_iter JEMALLOC_N(ckh_iter)
|
||||
#define ckh_new JEMALLOC_N(ckh_new)
|
||||
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
|
||||
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
|
||||
#define ckh_remove JEMALLOC_N(ckh_remove)
|
||||
#define ckh_search JEMALLOC_N(ckh_search)
|
||||
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
|
||||
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
|
||||
#define ctl_boot JEMALLOC_N(ctl_boot)
|
||||
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
||||
#define ctl_byname JEMALLOC_N(ctl_byname)
|
||||
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
|
||||
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
|
||||
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
|
||||
#define ctl_prefork JEMALLOC_N(ctl_prefork)
|
||||
#define div_init JEMALLOC_N(div_init)
|
||||
#define extent_alloc JEMALLOC_N(extent_alloc)
|
||||
#define extent_alloc_wrapper JEMALLOC_N(extent_alloc_wrapper)
|
||||
#define extent_avail_any JEMALLOC_N(extent_avail_any)
|
||||
#define extent_avail_empty JEMALLOC_N(extent_avail_empty)
|
||||
#define extent_avail_first JEMALLOC_N(extent_avail_first)
|
||||
#define extent_avail_insert JEMALLOC_N(extent_avail_insert)
|
||||
#define extent_avail_new JEMALLOC_N(extent_avail_new)
|
||||
#define extent_avail_remove JEMALLOC_N(extent_avail_remove)
|
||||
#define extent_avail_remove_any JEMALLOC_N(extent_avail_remove_any)
|
||||
#define extent_avail_remove_first JEMALLOC_N(extent_avail_remove_first)
|
||||
#define extent_boot JEMALLOC_N(extent_boot)
|
||||
#define extent_commit_wrapper JEMALLOC_N(extent_commit_wrapper)
|
||||
#define extent_dalloc JEMALLOC_N(extent_dalloc)
|
||||
#define extent_dalloc_gap JEMALLOC_N(extent_dalloc_gap)
|
||||
#define extent_dalloc_wrapper JEMALLOC_N(extent_dalloc_wrapper)
|
||||
#define extent_decommit_wrapper JEMALLOC_N(extent_decommit_wrapper)
|
||||
#define extent_destroy_wrapper JEMALLOC_N(extent_destroy_wrapper)
|
||||
#define extent_heap_any JEMALLOC_N(extent_heap_any)
|
||||
#define extent_heap_empty JEMALLOC_N(extent_heap_empty)
|
||||
#define extent_heap_first JEMALLOC_N(extent_heap_first)
|
||||
#define extent_heap_insert JEMALLOC_N(extent_heap_insert)
|
||||
#define extent_heap_new JEMALLOC_N(extent_heap_new)
|
||||
#define extent_heap_remove JEMALLOC_N(extent_heap_remove)
|
||||
#define extent_heap_remove_any JEMALLOC_N(extent_heap_remove_any)
|
||||
#define extent_heap_remove_first JEMALLOC_N(extent_heap_remove_first)
|
||||
#define extent_hooks_default JEMALLOC_N(extent_hooks_default)
|
||||
#define extent_hooks_get JEMALLOC_N(extent_hooks_get)
|
||||
#define extent_hooks_set JEMALLOC_N(extent_hooks_set)
|
||||
#define extent_merge_wrapper JEMALLOC_N(extent_merge_wrapper)
|
||||
#define extent_mutex_pool JEMALLOC_N(extent_mutex_pool)
|
||||
#define extent_purge_forced_wrapper JEMALLOC_N(extent_purge_forced_wrapper)
|
||||
#define extent_purge_lazy_wrapper JEMALLOC_N(extent_purge_lazy_wrapper)
|
||||
#define extents_alloc JEMALLOC_N(extents_alloc)
|
||||
#define extents_dalloc JEMALLOC_N(extents_dalloc)
|
||||
#define extents_evict JEMALLOC_N(extents_evict)
|
||||
#define extents_init JEMALLOC_N(extents_init)
|
||||
#define extents_nbytes_get JEMALLOC_N(extents_nbytes_get)
|
||||
#define extents_nextents_get JEMALLOC_N(extents_nextents_get)
|
||||
#define extents_npages_get JEMALLOC_N(extents_npages_get)
|
||||
#define extent_split_wrapper JEMALLOC_N(extent_split_wrapper)
|
||||
#define extents_postfork_child JEMALLOC_N(extents_postfork_child)
|
||||
#define extents_postfork_parent JEMALLOC_N(extents_postfork_parent)
|
||||
#define extents_prefork JEMALLOC_N(extents_prefork)
|
||||
#define extents_rtree JEMALLOC_N(extents_rtree)
|
||||
#define extents_state_get JEMALLOC_N(extents_state_get)
|
||||
#define extent_util_stats_get JEMALLOC_N(extent_util_stats_get)
|
||||
#define extent_util_stats_verbose_get JEMALLOC_N(extent_util_stats_verbose_get)
|
||||
#define opt_lg_extent_max_active_fit JEMALLOC_N(opt_lg_extent_max_active_fit)
|
||||
#define dss_prec_names JEMALLOC_N(dss_prec_names)
|
||||
#define extent_alloc_dss JEMALLOC_N(extent_alloc_dss)
|
||||
#define extent_dss_boot JEMALLOC_N(extent_dss_boot)
|
||||
#define extent_dss_mergeable JEMALLOC_N(extent_dss_mergeable)
|
||||
#define extent_dss_prec_get JEMALLOC_N(extent_dss_prec_get)
|
||||
#define extent_dss_prec_set JEMALLOC_N(extent_dss_prec_set)
|
||||
#define extent_in_dss JEMALLOC_N(extent_in_dss)
|
||||
#define opt_dss JEMALLOC_N(opt_dss)
|
||||
#define extent_alloc_mmap JEMALLOC_N(extent_alloc_mmap)
|
||||
#define extent_dalloc_mmap JEMALLOC_N(extent_dalloc_mmap)
|
||||
#define opt_retain JEMALLOC_N(opt_retain)
|
||||
#define hook_boot JEMALLOC_N(hook_boot)
|
||||
#define hook_install JEMALLOC_N(hook_install)
|
||||
#define hook_invoke_alloc JEMALLOC_N(hook_invoke_alloc)
|
||||
#define hook_invoke_dalloc JEMALLOC_N(hook_invoke_dalloc)
|
||||
#define hook_invoke_expand JEMALLOC_N(hook_invoke_expand)
|
||||
#define hook_remove JEMALLOC_N(hook_remove)
|
||||
#define large_dalloc JEMALLOC_N(large_dalloc)
|
||||
#define large_dalloc_finish JEMALLOC_N(large_dalloc_finish)
|
||||
#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
|
||||
#define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
|
||||
#define large_dalloc_prep_junked_locked JEMALLOC_N(large_dalloc_prep_junked_locked)
|
||||
#define large_malloc JEMALLOC_N(large_malloc)
|
||||
#define large_palloc JEMALLOC_N(large_palloc)
|
||||
#define large_prof_alloc_time_get JEMALLOC_N(large_prof_alloc_time_get)
|
||||
#define large_prof_alloc_time_set JEMALLOC_N(large_prof_alloc_time_set)
|
||||
#define large_prof_tctx_get JEMALLOC_N(large_prof_tctx_get)
|
||||
#define large_prof_tctx_reset JEMALLOC_N(large_prof_tctx_reset)
|
||||
#define large_prof_tctx_set JEMALLOC_N(large_prof_tctx_set)
|
||||
#define large_ralloc JEMALLOC_N(large_ralloc)
|
||||
#define large_ralloc_no_move JEMALLOC_N(large_ralloc_no_move)
|
||||
#define large_salloc JEMALLOC_N(large_salloc)
|
||||
#define log_init_done JEMALLOC_N(log_init_done)
|
||||
#define log_var_names JEMALLOC_N(log_var_names)
|
||||
#define log_var_update_state JEMALLOC_N(log_var_update_state)
|
||||
#define buferror JEMALLOC_N(buferror)
|
||||
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
|
||||
#define malloc_printf JEMALLOC_N(malloc_printf)
|
||||
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
|
||||
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
|
||||
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
|
||||
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
|
||||
#define malloc_write JEMALLOC_N(malloc_write)
|
||||
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
|
||||
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
|
||||
#define malloc_mutex_lock_slow JEMALLOC_N(malloc_mutex_lock_slow)
|
||||
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
|
||||
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
|
||||
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
|
||||
#define malloc_mutex_prof_data_reset JEMALLOC_N(malloc_mutex_prof_data_reset)
|
||||
#define mutex_pool_init JEMALLOC_N(mutex_pool_init)
|
||||
#define nstime_add JEMALLOC_N(nstime_add)
|
||||
#define nstime_compare JEMALLOC_N(nstime_compare)
|
||||
#define nstime_copy JEMALLOC_N(nstime_copy)
|
||||
#define nstime_divide JEMALLOC_N(nstime_divide)
|
||||
#define nstime_iadd JEMALLOC_N(nstime_iadd)
|
||||
#define nstime_idivide JEMALLOC_N(nstime_idivide)
|
||||
#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
|
||||
#define nstime_init JEMALLOC_N(nstime_init)
|
||||
#define nstime_init2 JEMALLOC_N(nstime_init2)
|
||||
#define nstime_isubtract JEMALLOC_N(nstime_isubtract)
|
||||
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
|
||||
#define nstime_msec JEMALLOC_N(nstime_msec)
|
||||
#define nstime_ns JEMALLOC_N(nstime_ns)
|
||||
#define nstime_nsec JEMALLOC_N(nstime_nsec)
|
||||
#define nstime_sec JEMALLOC_N(nstime_sec)
|
||||
#define nstime_subtract JEMALLOC_N(nstime_subtract)
|
||||
#define nstime_update JEMALLOC_N(nstime_update)
|
||||
#define init_system_thp_mode JEMALLOC_N(init_system_thp_mode)
|
||||
#define opt_thp JEMALLOC_N(opt_thp)
|
||||
#define pages_boot JEMALLOC_N(pages_boot)
|
||||
#define pages_commit JEMALLOC_N(pages_commit)
|
||||
#define pages_decommit JEMALLOC_N(pages_decommit)
|
||||
#define pages_dodump JEMALLOC_N(pages_dodump)
|
||||
#define pages_dontdump JEMALLOC_N(pages_dontdump)
|
||||
#define pages_huge JEMALLOC_N(pages_huge)
|
||||
#define pages_map JEMALLOC_N(pages_map)
|
||||
#define pages_nohuge JEMALLOC_N(pages_nohuge)
|
||||
#define pages_purge_forced JEMALLOC_N(pages_purge_forced)
|
||||
#define pages_purge_lazy JEMALLOC_N(pages_purge_lazy)
|
||||
#define pages_set_thp_state JEMALLOC_N(pages_set_thp_state)
|
||||
#define pages_unmap JEMALLOC_N(pages_unmap)
|
||||
#define thp_mode_names JEMALLOC_N(thp_mode_names)
|
||||
#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx)
|
||||
#define bt_init JEMALLOC_N(bt_init)
|
||||
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
|
||||
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
|
||||
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
|
||||
#define opt_prof JEMALLOC_N(opt_prof)
|
||||
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
|
||||
#define opt_prof_active JEMALLOC_N(opt_prof_active)
|
||||
#define opt_prof_final JEMALLOC_N(opt_prof_final)
|
||||
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
|
||||
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
|
||||
#define opt_prof_log JEMALLOC_N(opt_prof_log)
|
||||
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
|
||||
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
|
||||
#define prof_accum_init JEMALLOC_N(prof_accum_init)
|
||||
#define prof_active JEMALLOC_N(prof_active)
|
||||
#define prof_active_get JEMALLOC_N(prof_active_get)
|
||||
#define prof_active_set JEMALLOC_N(prof_active_set)
|
||||
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
|
||||
#define prof_backtrace JEMALLOC_N(prof_backtrace)
|
||||
#define prof_boot0 JEMALLOC_N(prof_boot0)
|
||||
#define prof_boot1 JEMALLOC_N(prof_boot1)
|
||||
#define prof_boot2 JEMALLOC_N(prof_boot2)
|
||||
#define prof_dump_header JEMALLOC_N(prof_dump_header)
|
||||
#define prof_dump_open JEMALLOC_N(prof_dump_open)
|
||||
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
|
||||
#define prof_gdump JEMALLOC_N(prof_gdump)
|
||||
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
|
||||
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
|
||||
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
|
||||
#define prof_idump JEMALLOC_N(prof_idump)
|
||||
#define prof_interval JEMALLOC_N(prof_interval)
|
||||
#define prof_logging_state JEMALLOC_N(prof_logging_state)
|
||||
#define prof_log_start JEMALLOC_N(prof_log_start)
|
||||
#define prof_log_stop JEMALLOC_N(prof_log_stop)
|
||||
#define prof_lookup JEMALLOC_N(prof_lookup)
|
||||
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
|
||||
#define prof_mdump JEMALLOC_N(prof_mdump)
|
||||
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
|
||||
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
|
||||
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
|
||||
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
|
||||
#define prof_reset JEMALLOC_N(prof_reset)
|
||||
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
|
||||
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
|
||||
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
||||
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
|
||||
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
|
||||
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
|
||||
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
|
||||
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
|
||||
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
|
||||
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
|
||||
#define rtree_ctx_data_init JEMALLOC_N(rtree_ctx_data_init)
|
||||
#define rtree_leaf_alloc JEMALLOC_N(rtree_leaf_alloc)
|
||||
#define rtree_leaf_dalloc JEMALLOC_N(rtree_leaf_dalloc)
|
||||
#define rtree_leaf_elm_lookup_hard JEMALLOC_N(rtree_leaf_elm_lookup_hard)
|
||||
#define rtree_new JEMALLOC_N(rtree_new)
|
||||
#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
|
||||
#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
|
||||
#define safety_check_fail JEMALLOC_N(safety_check_fail)
|
||||
#define safety_check_set_abort JEMALLOC_N(safety_check_set_abort)
|
||||
#define arena_mutex_names JEMALLOC_N(arena_mutex_names)
|
||||
#define global_mutex_names JEMALLOC_N(global_mutex_names)
|
||||
#define opt_stats_print JEMALLOC_N(opt_stats_print)
|
||||
#define opt_stats_print_opts JEMALLOC_N(opt_stats_print_opts)
|
||||
#define stats_print JEMALLOC_N(stats_print)
|
||||
#define sc_boot JEMALLOC_N(sc_boot)
|
||||
#define sc_data_global JEMALLOC_N(sc_data_global)
|
||||
#define sc_data_init JEMALLOC_N(sc_data_init)
|
||||
#define sc_data_update_slab_size JEMALLOC_N(sc_data_update_slab_size)
|
||||
#define sz_boot JEMALLOC_N(sz_boot)
|
||||
#define sz_index2size_tab JEMALLOC_N(sz_index2size_tab)
|
||||
#define sz_pind2sz_tab JEMALLOC_N(sz_pind2sz_tab)
|
||||
#define sz_size2index_tab JEMALLOC_N(sz_size2index_tab)
|
||||
#define nhbins JEMALLOC_N(nhbins)
|
||||
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
|
||||
#define opt_tcache JEMALLOC_N(opt_tcache)
|
||||
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
|
||||
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
|
||||
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
|
||||
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
|
||||
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
|
||||
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
|
||||
#define tcache_boot JEMALLOC_N(tcache_boot)
|
||||
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
|
||||
#define tcache_create_explicit JEMALLOC_N(tcache_create_explicit)
|
||||
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||
#define tcache_flush JEMALLOC_N(tcache_flush)
|
||||
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
|
||||
#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
|
||||
#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
|
||||
#define tcache_prefork JEMALLOC_N(tcache_prefork)
|
||||
#define tcaches JEMALLOC_N(tcaches)
|
||||
#define tcache_salloc JEMALLOC_N(tcache_salloc)
|
||||
#define tcaches_create JEMALLOC_N(tcaches_create)
|
||||
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
|
||||
#define tcaches_flush JEMALLOC_N(tcaches_flush)
|
||||
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
||||
#define tsd_tcache_data_init JEMALLOC_N(tsd_tcache_data_init)
|
||||
#define tsd_tcache_enabled_data_init JEMALLOC_N(tsd_tcache_enabled_data_init)
|
||||
#define test_hooks_arena_new_hook JEMALLOC_N(test_hooks_arena_new_hook)
|
||||
#define test_hooks_libc_hook JEMALLOC_N(test_hooks_libc_hook)
|
||||
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
|
||||
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
|
||||
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
|
||||
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
|
||||
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
|
||||
#define tsd_booted JEMALLOC_N(tsd_booted)
|
||||
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
|
||||
#define tsd_fetch_slow JEMALLOC_N(tsd_fetch_slow)
|
||||
#define tsd_global_slow JEMALLOC_N(tsd_global_slow)
|
||||
#define tsd_global_slow_dec JEMALLOC_N(tsd_global_slow_dec)
|
||||
#define tsd_global_slow_inc JEMALLOC_N(tsd_global_slow_inc)
|
||||
#define tsd_postfork_child JEMALLOC_N(tsd_postfork_child)
|
||||
#define tsd_postfork_parent JEMALLOC_N(tsd_postfork_parent)
|
||||
#define tsd_prefork JEMALLOC_N(tsd_prefork)
|
||||
#define tsd_slow_update JEMALLOC_N(tsd_slow_update)
|
||||
#define tsd_state_set JEMALLOC_N(tsd_state_set)
|
||||
#define tsd_tls JEMALLOC_N(tsd_tls)
|
||||
#define tsd_tsd JEMALLOC_N(tsd_tsd)
|
||||
#define witness_depth_error JEMALLOC_N(witness_depth_error)
|
||||
#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
|
||||
#define witness_init JEMALLOC_N(witness_init)
|
||||
#define witness_lock_error JEMALLOC_N(witness_lock_error)
|
||||
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
|
||||
#define witness_owner_error JEMALLOC_N(witness_owner_error)
|
||||
#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
|
||||
#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
|
||||
#define witness_prefork JEMALLOC_N(witness_prefork)
|
||||
185
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
Normal file
185
deps/jemalloc/include/jemalloc/internal/prng.h
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PRNG_H
|
||||
#define JEMALLOC_INTERNAL_PRNG_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
|
||||
/*
|
||||
* Simple linear congruential pseudo-random number generator:
|
||||
*
|
||||
* prng(y) = (a*x + c) % m
|
||||
*
|
||||
* where the following constants ensure maximal period:
|
||||
*
|
||||
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
|
||||
* c == Odd number (relatively prime to 2^n).
|
||||
* m == 2^32
|
||||
*
|
||||
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
|
||||
*
|
||||
* This choice of m has the disadvantage that the quality of the bits is
|
||||
* proportional to bit position. For example, the lowest bit has a cycle of 2,
|
||||
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
|
||||
* bits.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||
/******************************************************************************/
|
||||
#define PRNG_A_32 UINT32_C(1103515241)
|
||||
#define PRNG_C_32 UINT32_C(12347)
|
||||
|
||||
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
||||
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_state_next_u32(uint32_t state) {
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_state_next_u64(uint64_t state) {
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_state_next_zu(size_t state) {
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
return (state * PRNG_A_32) + PRNG_C_32;
|
||||
#elif LG_SIZEOF_PTR == 3
|
||||
return (state * PRNG_A_64) + PRNG_C_64;
|
||||
#else
|
||||
#error Unsupported pointer size
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* BEGIN PUBLIC API */
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* The prng_lg_range functions give a uniform int in the half-open range [0,
|
||||
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
|
||||
* Multithreaded 64-bit prngs aren't supported.
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
|
||||
uint32_t ret, state0, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 32);
|
||||
|
||||
state0 = atomic_load_u32(state, ATOMIC_RELAXED);
|
||||
|
||||
if (atomic) {
|
||||
do {
|
||||
state1 = prng_state_next_u32(state0);
|
||||
} while (!atomic_compare_exchange_weak_u32(state, &state0,
|
||||
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
} else {
|
||||
state1 = prng_state_next_u32(state0);
|
||||
atomic_store_u32(state, state1, ATOMIC_RELAXED);
|
||||
}
|
||||
ret = state1 >> (32 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
|
||||
uint64_t ret, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= 64);
|
||||
|
||||
state1 = prng_state_next_u64(*state);
|
||||
*state = state1;
|
||||
ret = state1 >> (64 - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
|
||||
size_t ret, state0, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
||||
|
||||
state0 = atomic_load_zu(state, ATOMIC_RELAXED);
|
||||
|
||||
if (atomic) {
|
||||
do {
|
||||
state1 = prng_state_next_zu(state0);
|
||||
} while (atomic_compare_exchange_weak_zu(state, &state0,
|
||||
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
} else {
|
||||
state1 = prng_state_next_zu(state0);
|
||||
atomic_store_zu(state, state1, ATOMIC_RELAXED);
|
||||
}
|
||||
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The prng_range functions behave like the prng_lg_range, but return a result
|
||||
* in [0, range) instead of [0, 2**lg_range).
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
|
||||
uint32_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_u32(state, lg_range, atomic);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_range_u64(uint64_t *state, uint64_t range) {
|
||||
uint64_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_u64(state, lg_range);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
|
||||
size_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
assert(range > 1);
|
||||
|
||||
/* Compute the ceiling of lg(range). */
|
||||
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
|
||||
|
||||
/* Generate a result in [0..range) via repeated trial. */
|
||||
do {
|
||||
ret = prng_lg_range_zu(state, lg_range, atomic);
|
||||
} while (ret >= range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PRNG_H */
|
||||
613
deps/jemalloc/include/jemalloc/internal/prof.h
vendored
Normal file
613
deps/jemalloc/include/jemalloc/internal/prof.h
vendored
Normal file
@@ -0,0 +1,613 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct prof_bt_s prof_bt_t;
|
||||
typedef struct prof_cnt_s prof_cnt_t;
|
||||
typedef struct prof_thr_cnt_s prof_thr_cnt_t;
|
||||
typedef struct prof_ctx_s prof_ctx_t;
|
||||
typedef struct prof_tdata_s prof_tdata_t;
|
||||
|
||||
/* Option defaults. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
# define PROF_PREFIX_DEFAULT "jeprof"
|
||||
#else
|
||||
# define PROF_PREFIX_DEFAULT ""
|
||||
#endif
|
||||
#define LG_PROF_SAMPLE_DEFAULT 19
|
||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||
|
||||
/*
|
||||
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
||||
* is based on __builtin_return_address() necessarily has a hard-coded number
|
||||
* of backtrace frame handlers, and should be kept in sync with this setting.
|
||||
*/
|
||||
#define PROF_BT_MAX 128
|
||||
|
||||
/* Maximum number of backtraces to store in each per thread LRU cache. */
|
||||
#define PROF_TCMAX 1024
|
||||
|
||||
/* Initial hash table size. */
|
||||
#define PROF_CKH_MINITEMS 64
|
||||
|
||||
/* Size of memory buffer to use when writing dump files. */
|
||||
#define PROF_DUMP_BUFSIZE 65536
|
||||
|
||||
/* Size of stack-allocated buffer used by prof_printf(). */
|
||||
#define PROF_PRINTF_BUFSIZE 128
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all ctx's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NCTX_LOCKS 1024
|
||||
|
||||
/*
|
||||
* prof_tdata pointers close to NULL are used to encode state information that
|
||||
* is used for cleaning up during thread shutdown.
|
||||
*/
|
||||
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
||||
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
||||
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct prof_bt_s {
|
||||
/* Backtrace, stored as len program counters. */
|
||||
void **vec;
|
||||
unsigned len;
|
||||
};
|
||||
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
|
||||
typedef struct {
|
||||
prof_bt_t *bt;
|
||||
unsigned nignore;
|
||||
unsigned max;
|
||||
} prof_unwind_data_t;
|
||||
#endif
|
||||
|
||||
struct prof_cnt_s {
|
||||
/*
|
||||
* Profiling counters. An allocation/deallocation pair can operate on
|
||||
* different prof_thr_cnt_t objects that are linked into the same
|
||||
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
|
||||
* negative. In principle it is possible for the *bytes counters to
|
||||
* overflow/underflow, but a general solution would require something
|
||||
* like 128-bit counters; this implementation doesn't bother to solve
|
||||
* that problem.
|
||||
*/
|
||||
int64_t curobjs;
|
||||
int64_t curbytes;
|
||||
uint64_t accumobjs;
|
||||
uint64_t accumbytes;
|
||||
};
|
||||
|
||||
struct prof_thr_cnt_s {
|
||||
/* Linkage into prof_ctx_t's cnts_ql. */
|
||||
ql_elm(prof_thr_cnt_t) cnts_link;
|
||||
|
||||
/* Linkage into thread's LRU. */
|
||||
ql_elm(prof_thr_cnt_t) lru_link;
|
||||
|
||||
/*
|
||||
* Associated context. If a thread frees an object that it did not
|
||||
* allocate, it is possible that the context is not cached in the
|
||||
* thread's hash table, in which case it must be able to look up the
|
||||
* context, insert a new prof_thr_cnt_t into the thread's hash table,
|
||||
* and link it into the prof_ctx_t's cnts_ql.
|
||||
*/
|
||||
prof_ctx_t *ctx;
|
||||
|
||||
/*
|
||||
* Threads use memory barriers to update the counters. Since there is
|
||||
* only ever one writer, the only challenge is for the reader to get a
|
||||
* consistent read of the counters.
|
||||
*
|
||||
* The writer uses this series of operations:
|
||||
*
|
||||
* 1) Increment epoch to an odd number.
|
||||
* 2) Update counters.
|
||||
* 3) Increment epoch to an even number.
|
||||
*
|
||||
* The reader must assure 1) that the epoch is even while it reads the
|
||||
* counters, and 2) that the epoch doesn't change between the time it
|
||||
* starts and finishes reading the counters.
|
||||
*/
|
||||
unsigned epoch;
|
||||
|
||||
/* Profiling counters. */
|
||||
prof_cnt_t cnts;
|
||||
};
|
||||
|
||||
struct prof_ctx_s {
|
||||
/* Associated backtrace. */
|
||||
prof_bt_t *bt;
|
||||
|
||||
/* Protects nlimbo, cnt_merged, and cnts_ql. */
|
||||
malloc_mutex_t *lock;
|
||||
|
||||
/*
|
||||
* Number of threads that currently cause this ctx to be in a state of
|
||||
* limbo due to one of:
|
||||
* - Initializing per thread counters associated with this ctx.
|
||||
* - Preparing to destroy this ctx.
|
||||
* - Dumping a heap profile that includes this ctx.
|
||||
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
||||
* ctx.
|
||||
*/
|
||||
unsigned nlimbo;
|
||||
|
||||
/* Temporary storage for summation during dump. */
|
||||
prof_cnt_t cnt_summed;
|
||||
|
||||
/* When threads exit, they merge their stats into cnt_merged. */
|
||||
prof_cnt_t cnt_merged;
|
||||
|
||||
/*
|
||||
* List of profile counters, one for each thread that has allocated in
|
||||
* this context.
|
||||
*/
|
||||
ql_head(prof_thr_cnt_t) cnts_ql;
|
||||
|
||||
/* Linkage for list of contexts to be dumped. */
|
||||
ql_elm(prof_ctx_t) dump_link;
|
||||
};
|
||||
typedef ql_head(prof_ctx_t) prof_ctx_list_t;
|
||||
|
||||
struct prof_tdata_s {
|
||||
/*
|
||||
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
|
||||
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
|
||||
* objects. Other threads may read the prof_thr_cnt_t contents, but no
|
||||
* others will ever write them.
|
||||
*
|
||||
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
|
||||
* counter data into the associated prof_ctx_t objects, and unlink/free
|
||||
* the prof_thr_cnt_t objects.
|
||||
*/
|
||||
ckh_t bt2cnt;
|
||||
|
||||
/* LRU for contents of bt2cnt. */
|
||||
ql_head(prof_thr_cnt_t) lru_ql;
|
||||
|
||||
/* Backtrace vector, used for calls to prof_backtrace(). */
|
||||
void **vec;
|
||||
|
||||
/* Sampling state. */
|
||||
uint64_t prng_state;
|
||||
uint64_t threshold;
|
||||
uint64_t accum;
|
||||
|
||||
/* State used to avoid dumping while operating on prof internals. */
|
||||
bool enq;
|
||||
bool enq_idump;
|
||||
bool enq_gdump;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_prof;
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by setting
|
||||
* opt_prof_active to false. No locking is used when updating opt_prof_active,
|
||||
* so there are no guarantees regarding how long it will take for all threads
|
||||
* to notice state changes.
|
||||
*/
|
||||
extern bool opt_prof_active;
|
||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||
extern bool opt_prof_final; /* Final profile dumping. */
|
||||
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
||||
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
||||
extern char opt_prof_prefix[
|
||||
/* Minimize memory bloat for non-prof builds. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
PATH_MAX +
|
||||
#endif
|
||||
1];
|
||||
|
||||
/*
|
||||
* Profile dump interval, measured in bytes allocated. Each arena triggers a
|
||||
* profile dump when it reaches this threshold. The effect is that the
|
||||
* interval between profile dumps averages prof_interval, though the actual
|
||||
* interval between dumps will tend to be sporadic, and the interval will be a
|
||||
* maximum of approximately (prof_interval * narenas).
|
||||
*/
|
||||
extern uint64_t prof_interval;
|
||||
|
||||
/*
|
||||
* If true, promote small sampled objects to large objects, since small run
|
||||
* headers do not have embedded profile context pointers.
|
||||
*/
|
||||
extern bool prof_promote;
|
||||
|
||||
void bt_init(prof_bt_t *bt, void **vec);
|
||||
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
|
||||
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t prof_bt_count(void);
|
||||
typedef int (prof_dump_open_t)(bool, const char *);
|
||||
extern prof_dump_open_t *prof_dump_open;
|
||||
#endif
|
||||
void prof_idump(void);
|
||||
bool prof_mdump(const char *filename);
|
||||
void prof_gdump(void);
|
||||
prof_tdata_t *prof_tdata_init(void);
|
||||
void prof_tdata_cleanup(void *arg);
|
||||
void prof_boot0(void);
|
||||
void prof_boot1(void);
|
||||
bool prof_boot2(void);
|
||||
void prof_prefork(void);
|
||||
void prof_postfork_parent(void);
|
||||
void prof_postfork_child(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
|
||||
prof_tdata_t *prof_tdata; \
|
||||
prof_bt_t bt; \
|
||||
\
|
||||
assert(size == s2u(size)); \
|
||||
\
|
||||
prof_tdata = prof_tdata_get(true); \
|
||||
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
|
||||
if (prof_tdata != NULL) \
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
else \
|
||||
ret = NULL; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
if (opt_prof_active == false) { \
|
||||
/* Sampling is currently inactive, so avoid sampling. */\
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
} else if (opt_lg_prof_sample == 0) { \
|
||||
/* Don't bother with sampling logic, since sampling */\
|
||||
/* interval is 1. */\
|
||||
bt_init(&bt, prof_tdata->vec); \
|
||||
prof_backtrace(&bt, nignore); \
|
||||
ret = prof_lookup(&bt); \
|
||||
} else { \
|
||||
if (prof_tdata->threshold == 0) { \
|
||||
/* Initialize. Seed the prng differently for */\
|
||||
/* each thread. */\
|
||||
prof_tdata->prng_state = \
|
||||
(uint64_t)(uintptr_t)&size; \
|
||||
prof_sample_threshold_update(prof_tdata); \
|
||||
} \
|
||||
\
|
||||
/* Determine whether to capture a backtrace based on */\
|
||||
/* whether size is enough for prof_accum to reach */\
|
||||
/* prof_tdata->threshold. However, delay updating */\
|
||||
/* these variables until prof_{m,re}alloc(), because */\
|
||||
/* we don't know for sure that the allocation will */\
|
||||
/* succeed. */\
|
||||
/* */\
|
||||
/* Use subtraction rather than addition to avoid */\
|
||||
/* potential integer overflow. */\
|
||||
if (size >= prof_tdata->threshold - \
|
||||
prof_tdata->accum) { \
|
||||
bt_init(&bt, prof_tdata->vec); \
|
||||
prof_backtrace(&bt, nignore); \
|
||||
ret = prof_lookup(&bt); \
|
||||
} else \
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
|
||||
|
||||
prof_tdata_t *prof_tdata_get(bool create);
|
||||
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
|
||||
prof_ctx_t *prof_ctx_get(const void *ptr);
|
||||
void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
|
||||
bool prof_sample_accum_update(size_t size);
|
||||
void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
|
||||
void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
|
||||
size_t old_usize, prof_ctx_t *old_ctx);
|
||||
void prof_free(const void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
||||
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
|
||||
malloc_tsd_externs(prof_tdata, prof_tdata_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
|
||||
prof_tdata_cleanup)
|
||||
|
||||
JEMALLOC_INLINE prof_tdata_t *
|
||||
prof_tdata_get(bool create)
|
||||
{
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
prof_tdata = *prof_tdata_tsd_get();
|
||||
if (create && prof_tdata == NULL)
|
||||
prof_tdata = prof_tdata_init();
|
||||
|
||||
return (prof_tdata);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
|
||||
{
|
||||
/*
|
||||
* The body of this function is compiled out unless heap profiling is
|
||||
* enabled, so that it is possible to compile jemalloc with floating
|
||||
* point support completely disabled. Avoiding floating point code is
|
||||
* important on memory-constrained systems, but it also enables a
|
||||
* workaround for versions of glibc that don't properly save/restore
|
||||
* floating point registers during dynamic lazy symbol loading (which
|
||||
* internally calls into whatever malloc implementation happens to be
|
||||
* integrated into the application). Note that some compilers (e.g.
|
||||
* gcc 4.8) may use floating point registers for fast memory moves, so
|
||||
* jemalloc must be compiled with such optimizations disabled (e.g.
|
||||
* -mno-sse) in order for the workaround to be complete.
|
||||
*/
|
||||
#ifdef JEMALLOC_PROF
|
||||
uint64_t r;
|
||||
double u;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Compute sample threshold as a geometrically distributed random
|
||||
* variable with mean (2^opt_lg_prof_sample).
|
||||
*
|
||||
* __ __
|
||||
* | log(u) | 1
|
||||
* prof_tdata->threshold = | -------- |, where p = -------------------
|
||||
* | log(1-p) | opt_lg_prof_sample
|
||||
* 2
|
||||
*
|
||||
* For more information on the math, see:
|
||||
*
|
||||
* Non-Uniform Random Variate Generation
|
||||
* Luc Devroye
|
||||
* Springer-Verlag, New York, 1986
|
||||
* pp 500
|
||||
* (http://luc.devroye.org/rnbookindex.html)
|
||||
*/
|
||||
prng64(r, 53, prof_tdata->prng_state,
|
||||
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
|
||||
u = (double)r * (1.0/9007199254740992.0L);
|
||||
prof_tdata->threshold = (uint64_t)(log(u) /
|
||||
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
|
||||
+ (uint64_t)1U;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE prof_ctx_t *
|
||||
prof_ctx_get(const void *ptr)
|
||||
{
|
||||
prof_ctx_t *ret;
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
ret = arena_prof_ctx_get(ptr);
|
||||
} else
|
||||
ret = huge_prof_ctx_get(ptr);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
arena_prof_ctx_set(ptr, usize, ctx);
|
||||
} else
|
||||
huge_prof_ctx_set(ptr, ctx);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
prof_sample_accum_update(size_t size)
|
||||
{
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
/* Sampling logic is unnecessary if the interval is 1. */
|
||||
assert(opt_lg_prof_sample != 0);
|
||||
|
||||
prof_tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
return (true);
|
||||
|
||||
/* Take care to avoid integer overflow. */
|
||||
if (size >= prof_tdata->threshold - prof_tdata->accum) {
|
||||
prof_tdata->accum -= (prof_tdata->threshold - size);
|
||||
/* Compute new sample threshold. */
|
||||
prof_sample_threshold_update(prof_tdata);
|
||||
while (prof_tdata->accum >= prof_tdata->threshold) {
|
||||
prof_tdata->accum -= prof_tdata->threshold;
|
||||
prof_sample_threshold_update(prof_tdata);
|
||||
}
|
||||
return (false);
|
||||
} else {
|
||||
prof_tdata->accum += size;
|
||||
return (true);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(usize == isalloc(ptr, true));
|
||||
|
||||
if (opt_lg_prof_sample != 0) {
|
||||
if (prof_sample_accum_update(usize)) {
|
||||
/*
|
||||
* Don't sample. For malloc()-like allocation, it is
|
||||
* always possible to tell in advance how large an
|
||||
* object's usable size will be, so there should never
|
||||
* be a difference between the usize passed to
|
||||
* PROF_ALLOC_PREP() and prof_malloc().
|
||||
*/
|
||||
assert((uintptr_t)cnt == (uintptr_t)1U);
|
||||
}
|
||||
}
|
||||
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
prof_ctx_set(ptr, usize, cnt->ctx);
|
||||
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
cnt->cnts.curobjs++;
|
||||
cnt->cnts.curbytes += usize;
|
||||
if (opt_prof_accum) {
|
||||
cnt->cnts.accumobjs++;
|
||||
cnt->cnts.accumbytes += usize;
|
||||
}
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
} else
|
||||
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
|
||||
size_t old_usize, prof_ctx_t *old_ctx)
|
||||
{
|
||||
prof_thr_cnt_t *told_cnt;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
|
||||
|
||||
if (ptr != NULL) {
|
||||
assert(usize == isalloc(ptr, true));
|
||||
if (opt_lg_prof_sample != 0) {
|
||||
if (prof_sample_accum_update(usize)) {
|
||||
/*
|
||||
* Don't sample. The usize passed to
|
||||
* PROF_ALLOC_PREP() was larger than what
|
||||
* actually got allocated, so a backtrace was
|
||||
* captured for this allocation, even though
|
||||
* its actual usize was insufficient to cross
|
||||
* the sample threshold.
|
||||
*/
|
||||
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((uintptr_t)old_ctx > (uintptr_t)1U) {
|
||||
told_cnt = prof_lookup(old_ctx->bt);
|
||||
if (told_cnt == NULL) {
|
||||
/*
|
||||
* It's too late to propagate OOM for this realloc(),
|
||||
* so operate directly on old_cnt->ctx->cnt_merged.
|
||||
*/
|
||||
malloc_mutex_lock(old_ctx->lock);
|
||||
old_ctx->cnt_merged.curobjs--;
|
||||
old_ctx->cnt_merged.curbytes -= old_usize;
|
||||
malloc_mutex_unlock(old_ctx->lock);
|
||||
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
}
|
||||
} else
|
||||
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U)
|
||||
told_cnt->epoch++;
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
prof_ctx_set(ptr, usize, cnt->ctx);
|
||||
cnt->epoch++;
|
||||
} else if (ptr != NULL)
|
||||
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
|
||||
told_cnt->cnts.curobjs--;
|
||||
told_cnt->cnts.curbytes -= old_usize;
|
||||
}
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
cnt->cnts.curobjs++;
|
||||
cnt->cnts.curbytes += usize;
|
||||
if (opt_prof_accum) {
|
||||
cnt->cnts.accumobjs++;
|
||||
cnt->cnts.accumbytes += usize;
|
||||
}
|
||||
}
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U)
|
||||
told_cnt->epoch++;
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U)
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write(); /* Not strictly necessary. */
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_free(const void *ptr, size_t size)
|
||||
{
|
||||
prof_ctx_t *ctx = prof_ctx_get(ptr);
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if ((uintptr_t)ctx > (uintptr_t)1) {
|
||||
prof_thr_cnt_t *tcnt;
|
||||
assert(size == isalloc(ptr, true));
|
||||
tcnt = prof_lookup(ctx->bt);
|
||||
|
||||
if (tcnt != NULL) {
|
||||
tcnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
tcnt->cnts.curobjs--;
|
||||
tcnt->cnts.curbytes -= size;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
tcnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
} else {
|
||||
/*
|
||||
* OOM during free() cannot be propagated, so operate
|
||||
* directly on cnt->ctx->cnt_merged.
|
||||
*/
|
||||
malloc_mutex_lock(ctx->lock);
|
||||
ctx->cnt_merged.curobjs--;
|
||||
ctx->cnt_merged.curbytes -= size;
|
||||
malloc_mutex_unlock(ctx->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
105
deps/jemalloc/include/jemalloc/internal/prof_externs.h
vendored
Normal file
105
deps/jemalloc/include/jemalloc/internal/prof_externs.h
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
extern malloc_mutex_t bt2gctx_mtx;
|
||||
|
||||
extern bool opt_prof;
|
||||
extern bool opt_prof_active;
|
||||
extern bool opt_prof_thread_active_init;
|
||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||
extern bool opt_prof_final; /* Final profile dumping. */
|
||||
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
||||
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
||||
extern bool opt_prof_log; /* Turn logging on at boot. */
|
||||
extern char opt_prof_prefix[
|
||||
/* Minimize memory bloat for non-prof builds. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
PATH_MAX +
|
||||
#endif
|
||||
1];
|
||||
|
||||
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_active;
|
||||
|
||||
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_gdump_val;
|
||||
|
||||
/*
|
||||
* Profile dump interval, measured in bytes allocated. Each arena triggers a
|
||||
* profile dump when it reaches this threshold. The effect is that the
|
||||
* interval between profile dumps averages prof_interval, though the actual
|
||||
* interval between dumps will tend to be sporadic, and the interval will be a
|
||||
* maximum of approximately (prof_interval * narenas).
|
||||
*/
|
||||
extern uint64_t prof_interval;
|
||||
|
||||
/*
|
||||
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
|
||||
* resets.
|
||||
*/
|
||||
extern size_t lg_prof_sample;
|
||||
|
||||
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
||||
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx);
|
||||
void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx);
|
||||
void bt_init(prof_bt_t *bt, void **vec);
|
||||
void prof_backtrace(prof_bt_t *bt);
|
||||
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t prof_tdata_count(void);
|
||||
size_t prof_bt_count(void);
|
||||
#endif
|
||||
typedef int (prof_dump_open_t)(bool, const char *);
|
||||
extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
|
||||
|
||||
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
|
||||
extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
|
||||
#ifdef JEMALLOC_JET
|
||||
void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
|
||||
uint64_t *accumbytes);
|
||||
#endif
|
||||
bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
|
||||
void prof_idump(tsdn_t *tsdn);
|
||||
bool prof_mdump(tsd_t *tsd, const char *filename);
|
||||
void prof_gdump(tsdn_t *tsdn);
|
||||
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
||||
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
||||
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
||||
void prof_tdata_cleanup(tsd_t *tsd);
|
||||
bool prof_active_get(tsdn_t *tsdn);
|
||||
bool prof_active_set(tsdn_t *tsdn, bool active);
|
||||
const char *prof_thread_name_get(tsd_t *tsd);
|
||||
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
||||
bool prof_thread_active_get(tsd_t *tsd);
|
||||
bool prof_thread_active_set(tsd_t *tsd, bool active);
|
||||
bool prof_thread_active_init_get(tsdn_t *tsdn);
|
||||
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
|
||||
bool prof_gdump_get(tsdn_t *tsdn);
|
||||
bool prof_gdump_set(tsdn_t *tsdn, bool active);
|
||||
void prof_boot0(void);
|
||||
void prof_boot1(void);
|
||||
bool prof_boot2(tsd_t *tsd);
|
||||
void prof_prefork0(tsdn_t *tsdn);
|
||||
void prof_prefork1(tsdn_t *tsdn);
|
||||
void prof_postfork_parent(tsdn_t *tsdn);
|
||||
void prof_postfork_child(tsdn_t *tsdn);
|
||||
void prof_sample_threshold_update(prof_tdata_t *tdata);
|
||||
|
||||
bool prof_log_start(tsdn_t *tsdn, const char *filename);
|
||||
bool prof_log_stop(tsdn_t *tsdn);
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t prof_log_bt_count(void);
|
||||
size_t prof_log_alloc_count(void);
|
||||
size_t prof_log_thr_count(void);
|
||||
bool prof_log_is_logging(void);
|
||||
bool prof_log_rep_check(void);
|
||||
void prof_log_dummy_set(bool new_value);
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
|
||||
85
deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h
vendored
Normal file
85
deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
static inline bool
|
||||
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
||||
uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
bool overflow;
|
||||
uint64_t a0, a1;
|
||||
|
||||
/*
|
||||
* If the application allocates fast enough (and/or if idump is slow
|
||||
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
||||
* idump trigger coalescing. This is an intentional mechanism that
|
||||
* avoids rate-limiting allocation.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = a0 + accumbytes;
|
||||
assert(a1 >= a0);
|
||||
overflow = (a1 >= prof_interval);
|
||||
if (overflow) {
|
||||
a1 %= prof_interval;
|
||||
}
|
||||
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
||||
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
||||
a0 = prof_accum->accumbytes;
|
||||
a1 = a0 + accumbytes;
|
||||
overflow = (a1 >= prof_interval);
|
||||
if (overflow) {
|
||||
a1 %= prof_interval;
|
||||
}
|
||||
prof_accum->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||
#endif
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static inline void
|
||||
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
||||
size_t usize) {
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Cancel out as much of the excessive prof_accumbytes increase as
|
||||
* possible without underflowing. Interval-triggered dumps occur
|
||||
* slightly more often than intended as a result of incomplete
|
||||
* canceling.
|
||||
*/
|
||||
uint64_t a0, a1;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
||||
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
||||
a0 = prof_accum->accumbytes;
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
prof_accum->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_active_get_unlocked(void) {
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by
|
||||
* setting prof_active to false. No locking is used when reading
|
||||
* prof_active in the fast path, so there are no guarantees regarding
|
||||
* how long it will take for all threads to notice state changes.
|
||||
*/
|
||||
return prof_active;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|
||||
250
deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h
vendored
Normal file
250
deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_gdump_get_unlocked(void) {
|
||||
/*
|
||||
* No locking is used when reading prof_gdump_val in the fast path, so
|
||||
* there are no guarantees regarding how long it will take for all
|
||||
* threads to notice state changes.
|
||||
*/
|
||||
return prof_gdump_val;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
||||
prof_tdata_get(tsd_t *tsd, bool create) {
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
tdata = tsd_prof_tdata_get(tsd);
|
||||
if (create) {
|
||||
if (unlikely(tdata == NULL)) {
|
||||
if (tsd_nominal(tsd)) {
|
||||
tdata = prof_tdata_init(tsd);
|
||||
tsd_prof_tdata_set(tsd, tdata);
|
||||
}
|
||||
} else if (unlikely(tdata->expired)) {
|
||||
tdata = prof_tdata_reinit(tsd, tdata);
|
||||
tsd_prof_tdata_set(tsd, tdata);
|
||||
}
|
||||
assert(tdata == NULL || tdata->attached);
|
||||
}
|
||||
|
||||
return tdata;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
arena_prof_tctx_reset(tsdn, ptr, tctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE nstime_t
|
||||
prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
|
||||
nstime_t t) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sample_check(tsd_t *tsd, size_t usize, bool update) {
|
||||
ssize_t check = update ? 0 : usize;
|
||||
|
||||
int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
|
||||
if (update) {
|
||||
bytes_until_sample -= usize;
|
||||
if (tsd_nominal(tsd)) {
|
||||
tsd_bytes_until_sample_set(tsd, bytes_until_sample);
|
||||
}
|
||||
}
|
||||
if (likely(bytes_until_sample >= check)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
||||
prof_tdata_t **tdata_out) {
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/* Fastpath: no need to load tdata */
|
||||
if (likely(prof_sample_check(tsd, usize, update))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool booted = tsd_prof_tdata_get(tsd);
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
|
||||
tdata = NULL;
|
||||
}
|
||||
|
||||
if (tdata_out != NULL) {
|
||||
*tdata_out = tdata;
|
||||
}
|
||||
|
||||
if (unlikely(tdata == NULL)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this was the first creation of tdata, then
|
||||
* prof_tdata_get() reset bytes_until_sample, so decrement and
|
||||
* check it again
|
||||
*/
|
||||
if (!booted && prof_sample_check(tsd, usize, update)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (tsd_reentrancy_level_get(tsd) > 0) {
|
||||
return true;
|
||||
}
|
||||
/* Compute new sample threshold. */
|
||||
if (update) {
|
||||
prof_sample_threshold_update(tdata);
|
||||
}
|
||||
return !tdata->active;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
|
||||
prof_tctx_t *ret;
|
||||
prof_tdata_t *tdata;
|
||||
prof_bt_t bt;
|
||||
|
||||
assert(usize == sz_s2u(usize));
|
||||
|
||||
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
||||
&tdata))) {
|
||||
ret = (prof_tctx_t *)(uintptr_t)1U;
|
||||
} else {
|
||||
bt_init(&bt, tdata->vec);
|
||||
prof_backtrace(&bt);
|
||||
ret = prof_lookup(tsd, &bt);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
|
||||
prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(usize == isalloc(tsdn, ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
|
||||
} else {
|
||||
prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
|
||||
(prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
||||
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
|
||||
prof_tctx_t *old_tctx) {
|
||||
bool sampled, old_sampled, moved;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||
|
||||
if (prof_active && !updated && ptr != NULL) {
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
||||
/*
|
||||
* Don't sample. The usize passed to prof_alloc_prep()
|
||||
* was larger than what actually got allocated, so a
|
||||
* backtrace was captured for this allocation, even
|
||||
* though its actual usize was insufficient to cross the
|
||||
* sample threshold.
|
||||
*/
|
||||
prof_alloc_rollback(tsd, tctx, true);
|
||||
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
||||
}
|
||||
}
|
||||
|
||||
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
||||
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
||||
moved = (ptr != old_ptr);
|
||||
|
||||
if (unlikely(sampled)) {
|
||||
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
|
||||
} else if (moved) {
|
||||
prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
|
||||
(prof_tctx_t *)(uintptr_t)1U);
|
||||
} else if (unlikely(old_sampled)) {
|
||||
/*
|
||||
* prof_tctx_set() would work for the !moved case as well, but
|
||||
* prof_tctx_reset() is slightly cheaper, and the proper thing
|
||||
* to do here in the presence of explicit knowledge re: moved
|
||||
* state.
|
||||
*/
|
||||
prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
|
||||
} else {
|
||||
assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
|
||||
(uintptr_t)1U);
|
||||
}
|
||||
|
||||
/*
|
||||
* The prof_free_sampled_object() call must come after the
|
||||
* prof_malloc_sample_object() call, because tctx and old_tctx may be
|
||||
* the same, in which case reversing the call order could cause the tctx
|
||||
* to be prematurely destroyed as a side effect of momentarily zeroed
|
||||
* counters.
|
||||
*/
|
||||
if (unlikely(old_sampled)) {
|
||||
prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
|
||||
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
|
||||
|
||||
cassert(config_prof);
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||
prof_free_sampled_object(tsd, ptr, usize, tctx);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
|
||||
200
deps/jemalloc/include/jemalloc/internal/prof_structs.h
vendored
Normal file
200
deps/jemalloc/include/jemalloc/internal/prof_structs.h
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/ckh.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/prng.h"
|
||||
#include "jemalloc/internal/rb.h"
|
||||
|
||||
struct prof_bt_s {
|
||||
/* Backtrace, stored as len program counters. */
|
||||
void **vec;
|
||||
unsigned len;
|
||||
};
|
||||
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
|
||||
typedef struct {
|
||||
prof_bt_t *bt;
|
||||
unsigned max;
|
||||
} prof_unwind_data_t;
|
||||
#endif
|
||||
|
||||
struct prof_accum_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
uint64_t accumbytes;
|
||||
#else
|
||||
atomic_u64_t accumbytes;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct prof_cnt_s {
|
||||
/* Profiling counters. */
|
||||
uint64_t curobjs;
|
||||
uint64_t curbytes;
|
||||
uint64_t accumobjs;
|
||||
uint64_t accumbytes;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
prof_tctx_state_initializing,
|
||||
prof_tctx_state_nominal,
|
||||
prof_tctx_state_dumping,
|
||||
prof_tctx_state_purgatory /* Dumper must finish destroying. */
|
||||
} prof_tctx_state_t;
|
||||
|
||||
struct prof_tctx_s {
|
||||
/* Thread data for thread that performed the allocation. */
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
/*
|
||||
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
|
||||
* defunct during teardown.
|
||||
*/
|
||||
uint64_t thr_uid;
|
||||
uint64_t thr_discrim;
|
||||
|
||||
/* Profiling counters, protected by tdata->lock. */
|
||||
prof_cnt_t cnts;
|
||||
|
||||
/* Associated global context. */
|
||||
prof_gctx_t *gctx;
|
||||
|
||||
/*
|
||||
* UID that distinguishes multiple tctx's created by the same thread,
|
||||
* but coexisting in gctx->tctxs. There are two ways that such
|
||||
* coexistence can occur:
|
||||
* - A dumper thread can cause a tctx to be retained in the purgatory
|
||||
* state.
|
||||
* - Although a single "producer" thread must create all tctx's which
|
||||
* share the same thr_uid, multiple "consumers" can each concurrently
|
||||
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
|
||||
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
|
||||
* threshold can be hit again before the first consumer finishes
|
||||
* executing prof_tctx_destroy().
|
||||
*/
|
||||
uint64_t tctx_uid;
|
||||
|
||||
/* Linkage into gctx's tctxs. */
|
||||
rb_node(prof_tctx_t) tctx_link;
|
||||
|
||||
/*
|
||||
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
|
||||
* sample vs destroy race.
|
||||
*/
|
||||
bool prepared;
|
||||
|
||||
/* Current dump-related state, protected by gctx->lock. */
|
||||
prof_tctx_state_t state;
|
||||
|
||||
/*
|
||||
* Copy of cnts snapshotted during early dump phase, protected by
|
||||
* dump_mtx.
|
||||
*/
|
||||
prof_cnt_t dump_cnts;
|
||||
};
|
||||
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
|
||||
|
||||
struct prof_gctx_s {
|
||||
/* Protects nlimbo, cnt_summed, and tctxs. */
|
||||
malloc_mutex_t *lock;
|
||||
|
||||
/*
|
||||
* Number of threads that currently cause this gctx to be in a state of
|
||||
* limbo due to one of:
|
||||
* - Initializing this gctx.
|
||||
* - Initializing per thread counters associated with this gctx.
|
||||
* - Preparing to destroy this gctx.
|
||||
* - Dumping a heap profile that includes this gctx.
|
||||
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
||||
* gctx.
|
||||
*/
|
||||
unsigned nlimbo;
|
||||
|
||||
/*
|
||||
* Tree of profile counters, one for each thread that has allocated in
|
||||
* this context.
|
||||
*/
|
||||
prof_tctx_tree_t tctxs;
|
||||
|
||||
/* Linkage for tree of contexts to be dumped. */
|
||||
rb_node(prof_gctx_t) dump_link;
|
||||
|
||||
/* Temporary storage for summation during dump. */
|
||||
prof_cnt_t cnt_summed;
|
||||
|
||||
/* Associated backtrace. */
|
||||
prof_bt_t bt;
|
||||
|
||||
/* Backtrace vector, variable size, referred to by bt. */
|
||||
void *vec[1];
|
||||
};
|
||||
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
|
||||
|
||||
struct prof_tdata_s {
|
||||
malloc_mutex_t *lock;
|
||||
|
||||
/* Monotonically increasing unique thread identifier. */
|
||||
uint64_t thr_uid;
|
||||
|
||||
/*
|
||||
* Monotonically increasing discriminator among tdata structures
|
||||
* associated with the same thr_uid.
|
||||
*/
|
||||
uint64_t thr_discrim;
|
||||
|
||||
/* Included in heap profile dumps if non-NULL. */
|
||||
char *thread_name;
|
||||
|
||||
bool attached;
|
||||
bool expired;
|
||||
|
||||
rb_node(prof_tdata_t) tdata_link;
|
||||
|
||||
/*
|
||||
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
|
||||
* necessary when incrementing this field, because only one thread ever
|
||||
* does so.
|
||||
*/
|
||||
uint64_t tctx_uid_next;
|
||||
|
||||
/*
|
||||
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
|
||||
* backtraces for which it has non-zero allocation/deallocation counters
|
||||
* associated with thread-specific prof_tctx_t objects. Other threads
|
||||
* may write to prof_tctx_t contents when freeing associated objects.
|
||||
*/
|
||||
ckh_t bt2tctx;
|
||||
|
||||
/* Sampling state. */
|
||||
uint64_t prng_state;
|
||||
|
||||
/* State used to avoid dumping while operating on prof internals. */
|
||||
bool enq;
|
||||
bool enq_idump;
|
||||
bool enq_gdump;
|
||||
|
||||
/*
|
||||
* Set to true during an early dump phase for tdata's which are
|
||||
* currently being dumped. New threads' tdata's have this initialized
|
||||
* to false so that they aren't accidentally included in later dump
|
||||
* phases.
|
||||
*/
|
||||
bool dumping;
|
||||
|
||||
/*
|
||||
* True if profiling is active for this tdata's thread
|
||||
* (thread.prof.active mallctl).
|
||||
*/
|
||||
bool active;
|
||||
|
||||
/* Temporary storage for summation during dump. */
|
||||
prof_cnt_t cnt_summed;
|
||||
|
||||
/* Backtrace vector, used for calls to prof_backtrace(). */
|
||||
void *vec[PROF_BT_MAX];
|
||||
};
|
||||
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
|
||||
56
deps/jemalloc/include/jemalloc/internal/prof_types.h
vendored
Normal file
56
deps/jemalloc/include/jemalloc/internal/prof_types.h
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_PROF_TYPES_H
|
||||
|
||||
typedef struct prof_bt_s prof_bt_t;
|
||||
typedef struct prof_accum_s prof_accum_t;
|
||||
typedef struct prof_cnt_s prof_cnt_t;
|
||||
typedef struct prof_tctx_s prof_tctx_t;
|
||||
typedef struct prof_gctx_s prof_gctx_t;
|
||||
typedef struct prof_tdata_s prof_tdata_t;
|
||||
|
||||
/* Option defaults. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
# define PROF_PREFIX_DEFAULT "jeprof"
|
||||
#else
|
||||
# define PROF_PREFIX_DEFAULT ""
|
||||
#endif
|
||||
#define LG_PROF_SAMPLE_DEFAULT 19
|
||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||
|
||||
/*
|
||||
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
||||
* is based on __builtin_return_address() necessarily has a hard-coded number
|
||||
* of backtrace frame handlers, and should be kept in sync with this setting.
|
||||
*/
|
||||
#define PROF_BT_MAX 128
|
||||
|
||||
/* Initial hash table size. */
|
||||
#define PROF_CKH_MINITEMS 64
|
||||
|
||||
/* Size of memory buffer to use when writing dump files. */
|
||||
#define PROF_DUMP_BUFSIZE 65536
|
||||
|
||||
/* Size of stack-allocated buffer used by prof_printf(). */
|
||||
#define PROF_PRINTF_BUFSIZE 128
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all gctx's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NCTX_LOCKS 1024
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all tdata's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NTDATA_LOCKS 256
|
||||
|
||||
/*
|
||||
* prof_tdata pointers close to NULL are used to encode state information that
|
||||
* is used for cleaning up during thread shutdown.
|
||||
*/
|
||||
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
||||
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
||||
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
|
||||
88
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
Normal file
88
deps/jemalloc/include/jemalloc/internal/ql.h
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
#ifndef JEMALLOC_INTERNAL_QL_H
|
||||
#define JEMALLOC_INTERNAL_QL_H
|
||||
|
||||
#include "jemalloc/internal/qr.h"
|
||||
|
||||
/* List definitions. */
|
||||
#define ql_head(a_type) \
|
||||
struct { \
|
||||
a_type *qlh_first; \
|
||||
}
|
||||
|
||||
#define ql_head_initializer(a_head) {NULL}
|
||||
|
||||
#define ql_elm(a_type) qr(a_type)
|
||||
|
||||
/* List functions. */
|
||||
#define ql_new(a_head) do { \
|
||||
(a_head)->qlh_first = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
|
||||
|
||||
#define ql_first(a_head) ((a_head)->qlh_first)
|
||||
|
||||
#define ql_last(a_head, a_field) \
|
||||
((ql_first(a_head) != NULL) \
|
||||
? qr_prev(ql_first(a_head), a_field) : NULL)
|
||||
|
||||
#define ql_next(a_head, a_elm, a_field) \
|
||||
((ql_last(a_head, a_field) != (a_elm)) \
|
||||
? qr_next((a_elm), a_field) : NULL)
|
||||
|
||||
#define ql_prev(a_head, a_elm, a_field) \
|
||||
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
|
||||
: NULL)
|
||||
|
||||
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
|
||||
qr_before_insert((a_qlelm), (a_elm), a_field); \
|
||||
if (ql_first(a_head) == (a_qlelm)) { \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_after_insert(a_qlelm, a_elm, a_field) \
|
||||
qr_after_insert((a_qlelm), (a_elm), a_field)
|
||||
|
||||
#define ql_head_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = (a_elm); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_insert(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) != NULL) { \
|
||||
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
|
||||
} \
|
||||
ql_first(a_head) = qr_next((a_elm), a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_remove(a_head, a_elm, a_field) do { \
|
||||
if (ql_first(a_head) == (a_elm)) { \
|
||||
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
|
||||
} \
|
||||
if (ql_first(a_head) != (a_elm)) { \
|
||||
qr_remove((a_elm), a_field); \
|
||||
} else { \
|
||||
ql_first(a_head) = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ql_head_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_first(a_head); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_tail_remove(a_head, a_type, a_field) do { \
|
||||
a_type *t = ql_last(a_head, a_field); \
|
||||
ql_remove((a_head), t, a_field); \
|
||||
} while (0)
|
||||
|
||||
#define ql_foreach(a_var, a_head, a_field) \
|
||||
qr_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QL_H */
|
||||
72
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
Normal file
72
deps/jemalloc/include/jemalloc/internal/qr.h
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
#ifndef JEMALLOC_INTERNAL_QR_H
|
||||
#define JEMALLOC_INTERNAL_QR_H
|
||||
|
||||
/* Ring definitions. */
|
||||
#define qr(a_type) \
|
||||
struct { \
|
||||
a_type *qre_next; \
|
||||
a_type *qre_prev; \
|
||||
}
|
||||
|
||||
/* Ring functions. */
|
||||
#define qr_new(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qr); \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
|
||||
|
||||
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
|
||||
|
||||
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_next = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
|
||||
a_type *t; \
|
||||
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
|
||||
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
|
||||
t = (a_qr_a)->a_field.qre_prev; \
|
||||
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
|
||||
(a_qr_b)->a_field.qre_prev = t; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code.
|
||||
*/
|
||||
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
|
||||
|
||||
#define qr_remove(a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_prev->a_field.qre_next \
|
||||
= (a_qr)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev \
|
||||
= (a_qr)->a_field.qre_prev; \
|
||||
(a_qr)->a_field.qre_next = (a_qr); \
|
||||
(a_qr)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_foreach(var, a_qr, a_field) \
|
||||
for ((var) = (a_qr); \
|
||||
(var) != NULL; \
|
||||
(var) = (((var)->a_field.qre_next != (a_qr)) \
|
||||
? (var)->a_field.qre_next : NULL))
|
||||
|
||||
#define qr_reverse_foreach(var, a_qr, a_field) \
|
||||
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
|
||||
(var) != NULL; \
|
||||
(var) = (((var) != (a_qr)) \
|
||||
? (var)->a_field.qre_prev : NULL))
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QR_H */
|
||||
77
deps/jemalloc/include/jemalloc/internal/quantum.h
vendored
Normal file
77
deps/jemalloc/include/jemalloc/internal/quantum.h
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
#ifndef JEMALLOC_INTERNAL_QUANTUM_H
|
||||
#define JEMALLOC_INTERNAL_QUANTUM_H
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
#ifndef LG_QUANTUM
|
||||
# if (defined(__i386__) || defined(_M_IX86))
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __ia64__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __alpha__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __arm__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __aarch64__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __hppa__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __m68k__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __mips__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __nios2__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __or1k__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __powerpc__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# if defined(__riscv) || defined(__riscv__)
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __s390__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
|
||||
defined(__SH4_SINGLE_ONLY__))
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __tile__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __le32__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifndef LG_QUANTUM
|
||||
# error "Unknown minimum alignment for architecture; specify via "
|
||||
"--with-lg-quantum"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
|
||||
#define QUANTUM_MASK (QUANTUM - 1)
|
||||
|
||||
/* Return the smallest quantum multiple that is >= a. */
|
||||
#define QUANTUM_CEILING(a) \
|
||||
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_QUANTUM_H */
|
||||
67
deps/jemalloc/include/jemalloc/internal/quarantine.h
vendored
Normal file
67
deps/jemalloc/include/jemalloc/internal/quarantine.h
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct quarantine_obj_s quarantine_obj_t;
|
||||
typedef struct quarantine_s quarantine_t;
|
||||
|
||||
/* Default per thread quarantine size if valgrind is enabled. */
|
||||
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct quarantine_obj_s {
|
||||
void *ptr;
|
||||
size_t usize;
|
||||
};
|
||||
|
||||
struct quarantine_s {
|
||||
size_t curbytes;
|
||||
size_t curobjs;
|
||||
size_t first;
|
||||
#define LG_MAXOBJS_INIT 10
|
||||
size_t lg_maxobjs;
|
||||
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
quarantine_t *quarantine_init(size_t lg_maxobjs);
|
||||
void quarantine(void *ptr);
|
||||
void quarantine_cleanup(void *arg);
|
||||
bool quarantine_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
|
||||
|
||||
void quarantine_alloc_hook(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
|
||||
malloc_tsd_externs(quarantine, quarantine_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
|
||||
quarantine_cleanup)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
quarantine_alloc_hook(void)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
|
||||
assert(config_fill && opt_quarantine);
|
||||
|
||||
quarantine = *quarantine_tsd_get();
|
||||
if (quarantine == NULL)
|
||||
quarantine_init(LG_MAXOBJS_INIT);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
1006
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
Normal file
1006
deps/jemalloc/include/jemalloc/internal/rb.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
528
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
Normal file
528
deps/jemalloc/include/jemalloc/internal/rtree.h
vendored
Normal file
@@ -0,0 +1,528 @@
|
||||
#ifndef JEMALLOC_INTERNAL_RTREE_H
|
||||
#define JEMALLOC_INTERNAL_RTREE_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree_tsd.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/*
|
||||
* This radix tree implementation is tailored to the singular purpose of
|
||||
* associating metadata with extents that are currently owned by jemalloc.
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
|
||||
/* Number of high insignificant bits. */
|
||||
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
|
||||
/* Number of low insigificant bits. */
|
||||
#define RTREE_NLIB LG_PAGE
|
||||
/* Number of significant bits. */
|
||||
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
|
||||
/* Number of levels in radix tree. */
|
||||
#if RTREE_NSB <= 10
|
||||
# define RTREE_HEIGHT 1
|
||||
#elif RTREE_NSB <= 36
|
||||
# define RTREE_HEIGHT 2
|
||||
#elif RTREE_NSB <= 52
|
||||
# define RTREE_HEIGHT 3
|
||||
#else
|
||||
# error Unsupported number of significant virtual address bits
|
||||
#endif
|
||||
/* Use compact leaf representation if virtual address encoding allows. */
|
||||
#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
|
||||
# define RTREE_LEAF_COMPACT
|
||||
#endif
|
||||
|
||||
/* Needed for initialization only. */
|
||||
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
|
||||
|
||||
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
||||
struct rtree_node_elm_s {
|
||||
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
|
||||
};
|
||||
|
||||
struct rtree_leaf_elm_s {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
/*
|
||||
* Single pointer-width field containing all three leaf element fields.
|
||||
* For example, on a 64-bit x64 system with 48 significant virtual
|
||||
* memory address bits, the index, extent, and slab fields are packed as
|
||||
* such:
|
||||
*
|
||||
* x: index
|
||||
* e: extent
|
||||
* b: slab
|
||||
*
|
||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
||||
*/
|
||||
atomic_p_t le_bits;
|
||||
#else
|
||||
atomic_p_t le_extent; /* (extent_t *) */
|
||||
atomic_u_t le_szind; /* (szind_t) */
|
||||
atomic_b_t le_slab; /* (bool) */
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct rtree_level_s rtree_level_t;
|
||||
struct rtree_level_s {
|
||||
/* Number of key bits distinguished by this level. */
|
||||
unsigned bits;
|
||||
/*
|
||||
* Cumulative number of key bits distinguished by traversing to
|
||||
* corresponding tree level.
|
||||
*/
|
||||
unsigned cumbits;
|
||||
};
|
||||
|
||||
typedef struct rtree_s rtree_t;
|
||||
struct rtree_s {
|
||||
malloc_mutex_t init_lock;
|
||||
/* Number of elements based on rtree_levels[0].bits. */
|
||||
#if RTREE_HEIGHT > 1
|
||||
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||
#else
|
||||
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Split the bits into one to three partitions depending on number of
|
||||
* significant bits. It the number of bits does not divide evenly into the
|
||||
* number of levels, place one remainder bit per level starting at the leaf
|
||||
* level.
|
||||
*/
|
||||
static const rtree_level_t rtree_levels[] = {
|
||||
#if RTREE_HEIGHT == 1
|
||||
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
|
||||
#elif RTREE_HEIGHT == 2
|
||||
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
|
||||
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
|
||||
#elif RTREE_HEIGHT == 3
|
||||
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
|
||||
{RTREE_NSB/3 + RTREE_NSB%3/2,
|
||||
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
|
||||
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
|
||||
#else
|
||||
# error Unsupported rtree height
|
||||
#endif
|
||||
};
|
||||
|
||||
bool rtree_new(rtree_t *rtree, bool zeroed);
|
||||
|
||||
typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||
extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
|
||||
|
||||
typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||
extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
|
||||
|
||||
typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
|
||||
extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
|
||||
|
||||
typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
|
||||
extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
|
||||
#ifdef JEMALLOC_JET
|
||||
void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
|
||||
#endif
|
||||
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leafkey(uintptr_t key) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
unsigned maskbits = ptrbits - cumbits;
|
||||
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
|
||||
return (key & mask);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
rtree_cache_direct_map(uintptr_t key) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
unsigned maskbits = ptrbits - cumbits;
|
||||
return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_subkey(uintptr_t key, unsigned level) {
|
||||
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||
unsigned cumbits = rtree_levels[level].cumbits;
|
||||
unsigned shiftbits = ptrbits - cumbits;
|
||||
unsigned maskbits = rtree_levels[level].bits;
|
||||
uintptr_t mask = (ZU(1) << maskbits) - 1;
|
||||
return ((key >> shiftbits) & mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomic getters.
|
||||
*
|
||||
* dependent: Reading a value on behalf of a pointer to a valid allocation
|
||||
* is guaranteed to be a clean read even without synchronization,
|
||||
* because the rtree update became visible in memory before the
|
||||
* pointer came into existence.
|
||||
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
||||
* dependent on a previous rtree write, which means a stale read
|
||||
* could result if synchronization were omitted here.
|
||||
*/
|
||||
# ifdef RTREE_LEAF_COMPACT
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
||||
# ifdef __aarch64__
|
||||
/*
|
||||
* aarch64 doesn't sign extend the highest virtual address bit to set
|
||||
* the higher ones. Instead, the high bits gets zeroed.
|
||||
*/
|
||||
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
|
||||
/* Mask off the slab bit. */
|
||||
uintptr_t low_bit_mask = ~(uintptr_t)1;
|
||||
uintptr_t mask = high_bit_mask & low_bit_mask;
|
||||
return (extent_t *)(bits & mask);
|
||||
# else
|
||||
/* Restore sign-extended high bits, mask slab bit. */
|
||||
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
||||
RTREE_NHIB) & ~((uintptr_t)0x1));
|
||||
# endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
|
||||
return (szind_t)(bits >> LG_VADDR);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
||||
return (bool)(bits & (uintptr_t)0x1);
|
||||
}
|
||||
|
||||
# endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_extent_get(bits);
|
||||
#else
|
||||
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
return extent;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_szind_get(bits);
|
||||
#else
|
||||
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
|
||||
: ATOMIC_ACQUIRE);
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_slab_get(bits);
|
||||
#else
|
||||
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
|
||||
ATOMIC_ACQUIRE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, extent_t *extent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
|
||||
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, szind_t szind) {
|
||||
assert(szind <= SC_NSIZES);
|
||||
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
((uintptr_t)slab);
|
||||
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||
#else
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
/*
|
||||
* Write extent last, since the element is atomically considered valid
|
||||
* as soon as the extent field is non-NULL.
|
||||
*/
|
||||
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
|
||||
assert(!slab || szind < SC_NBINS);
|
||||
|
||||
/*
|
||||
* The caller implicitly assures that it is the only writer to the szind
|
||||
* and slab fields, and that the extent field cannot currently change.
|
||||
*/
|
||||
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||
rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
assert(key != 0);
|
||||
assert(!dependent || !init_missing);
|
||||
|
||||
size_t slot = rtree_cache_direct_map(key);
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
assert(leafkey != RTREE_LEAFKEY_INVALID);
|
||||
|
||||
/* Fast path: L1 direct mapped cache. */
|
||||
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
|
||||
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
|
||||
assert(leaf != NULL);
|
||||
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
|
||||
return &leaf[subkey];
|
||||
}
|
||||
/*
|
||||
* Search the L2 LRU cache. On hit, swap the matching element into the
|
||||
* slot in L1 cache, and move the position in L2 up by 1.
|
||||
*/
|
||||
#define RTREE_CACHE_CHECK_L2(i) do { \
|
||||
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
|
||||
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
|
||||
assert(leaf != NULL); \
|
||||
if (i > 0) { \
|
||||
/* Bubble up by one. */ \
|
||||
rtree_ctx->l2_cache[i].leafkey = \
|
||||
rtree_ctx->l2_cache[i - 1].leafkey; \
|
||||
rtree_ctx->l2_cache[i].leaf = \
|
||||
rtree_ctx->l2_cache[i - 1].leaf; \
|
||||
rtree_ctx->l2_cache[i - 1].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[i - 1].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
} else { \
|
||||
rtree_ctx->l2_cache[0].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[0].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
} \
|
||||
rtree_ctx->cache[slot].leafkey = leafkey; \
|
||||
rtree_ctx->cache[slot].leaf = leaf; \
|
||||
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
|
||||
return &leaf[subkey]; \
|
||||
} \
|
||||
} while (0)
|
||||
/* Check the first cache entry. */
|
||||
RTREE_CACHE_CHECK_L2(0);
|
||||
/* Search the remaining cache elements. */
|
||||
for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||
RTREE_CACHE_CHECK_L2(i);
|
||||
}
|
||||
#undef RTREE_CACHE_CHECK_L2
|
||||
|
||||
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
|
||||
dependent, init_missing);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
extent_t *extent, szind_t szind, bool slab) {
|
||||
/* Use rtree_clear() to set the extent to NULL. */
|
||||
assert(extent != NULL);
|
||||
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||
key, false, true);
|
||||
if (elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||
key, dependent, false);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(elm != NULL);
|
||||
return elm;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return SC_NSIZES;
|
||||
}
|
||||
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
}
|
||||
|
||||
/*
|
||||
* rtree_slab_read() is intentionally omitted because slab is always read in
|
||||
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to read szind_slab from the L1 cache. Returns true on a hit,
|
||||
* and fills in r_szind and r_slab. Otherwise returns false.
|
||||
*
|
||||
* Key is allowed to be NULL in order to save an extra branch on the
|
||||
* fastpath. returns false in this case.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, szind_t *r_szind, bool *r_slab) {
|
||||
rtree_leaf_elm_t *elm;
|
||||
|
||||
size_t slot = rtree_cache_direct_map(key);
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
assert(leafkey != RTREE_LEAFKEY_INVALID);
|
||||
|
||||
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
|
||||
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
|
||||
assert(leaf != NULL);
|
||||
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
|
||||
elm = &leaf[subkey];
|
||||
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree,
|
||||
elm, true);
|
||||
*r_szind = rtree_leaf_elm_bits_szind_get(bits);
|
||||
*r_slab = rtree_leaf_elm_bits_slab_get(bits);
|
||||
#else
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true);
|
||||
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true);
|
||||
#endif
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||
dependent);
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
*r_szind = rtree_leaf_elm_bits_szind_get(bits);
|
||||
*r_slab = rtree_leaf_elm_bits_slab_get(bits);
|
||||
#else
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, szind_t szind, bool slab) {
|
||||
assert(!slab || szind < SC_NBINS);
|
||||
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key) {
|
||||
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
|
||||
NULL);
|
||||
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_H */
|
||||
50
deps/jemalloc/include/jemalloc/internal/rtree_tsd.h
vendored
Normal file
50
deps/jemalloc/include/jemalloc/internal/rtree_tsd.h
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
|
||||
#define JEMALLOC_INTERNAL_RTREE_CTX_H
|
||||
|
||||
/*
|
||||
* Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
|
||||
* entry supports an entire leaf, so the cache hit rate is typically high even
|
||||
* with a small number of entries. In rare cases extent activity will straddle
|
||||
* the boundary between two leaf nodes. Furthermore, an arena may use a
|
||||
* combination of dss and mmap. Note that as memory usage grows past the amount
|
||||
* that this cache can directly cover, the cache will become less effective if
|
||||
* locality of reference is low, but the consequence is merely cache misses
|
||||
* while traversing the tree nodes.
|
||||
*
|
||||
* The L1 direct mapped cache offers consistent and low cost on cache hit.
|
||||
* However collision could affect hit rate negatively. This is resolved by
|
||||
* combining with a L2 LRU cache, which requires linear search and re-ordering
|
||||
* on access but suffers no collision. Note that, the cache will itself suffer
|
||||
* cache misses if made overly large, plus the cost of linear search in the LRU
|
||||
* cache.
|
||||
*/
|
||||
#define RTREE_CTX_LG_NCACHE 4
|
||||
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
|
||||
#define RTREE_CTX_NCACHE_L2 8
|
||||
|
||||
/*
|
||||
* Zero initializer required for tsd initialization only. Proper initialization
|
||||
* done via rtree_ctx_data_init().
|
||||
*/
|
||||
#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
|
||||
|
||||
|
||||
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
|
||||
|
||||
typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
|
||||
struct rtree_ctx_cache_elm_s {
|
||||
uintptr_t leafkey;
|
||||
rtree_leaf_elm_t *leaf;
|
||||
};
|
||||
|
||||
typedef struct rtree_ctx_s rtree_ctx_t;
|
||||
struct rtree_ctx_s {
|
||||
/* Direct mapped cache. */
|
||||
rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
|
||||
/* L2 LRU cache. */
|
||||
rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
|
||||
};
|
||||
|
||||
void rtree_ctx_data_init(rtree_ctx_t *ctx);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
|
||||
26
deps/jemalloc/include/jemalloc/internal/safety_check.h
vendored
Normal file
26
deps/jemalloc/include/jemalloc/internal/safety_check.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
|
||||
#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
|
||||
|
||||
void safety_check_fail(const char *format, ...);
|
||||
/* Can set to NULL for a default. */
|
||||
void safety_check_set_abort(void (*abort_fn)());
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
|
||||
assert(usize < bumped_usize);
|
||||
for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
|
||||
*((unsigned char *)ptr + i) = 0xBC;
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
|
||||
{
|
||||
for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
|
||||
if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) {
|
||||
safety_check_fail("Use after free error\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
|
||||
333
deps/jemalloc/include/jemalloc/internal/sc.h
vendored
Normal file
333
deps/jemalloc/include/jemalloc/internal/sc.h
vendored
Normal file
@@ -0,0 +1,333 @@
|
||||
#ifndef JEMALLOC_INTERNAL_SC_H
|
||||
#define JEMALLOC_INTERNAL_SC_H
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
|
||||
/*
|
||||
* Size class computations:
|
||||
*
|
||||
* These are a little tricky; we'll first start by describing how things
|
||||
* generally work, and then describe some of the details.
|
||||
*
|
||||
* Ignore the first few size classes for a moment. We can then split all the
|
||||
* remaining size classes into groups. The size classes in a group are spaced
|
||||
* such that they cover allocation request sizes in a power-of-2 range. The
|
||||
* power of two is called the base of the group, and the size classes in it
|
||||
* satisfy allocations in the half-open range (base, base * 2]. There are
|
||||
* SC_NGROUP size classes in each group, equally spaced in the range, so that
|
||||
* each one covers allocations for base / SC_NGROUP possible allocation sizes.
|
||||
* We call that value (base / SC_NGROUP) the delta of the group. Each size class
|
||||
* is delta larger than the one before it (including the initial size class in a
|
||||
* group, which is delta larger than base, the largest size class in the
|
||||
* previous group).
|
||||
* To make the math all work out nicely, we require that SC_NGROUP is a power of
|
||||
* two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
|
||||
* lg_base and lg_delta. For each of these groups then, we have that
|
||||
* lg_delta == lg_base - SC_LG_NGROUP.
|
||||
* The size classes in a group with a given lg_base and lg_delta (which, recall,
|
||||
* can be computed from lg_base for these groups) are therefore:
|
||||
* base + 1 * delta
|
||||
* which covers allocations in (base, base + 1 * delta]
|
||||
* base + 2 * delta
|
||||
* which covers allocations in (base + 1 * delta, base + 2 * delta].
|
||||
* base + 3 * delta
|
||||
* which covers allocations in (base + 2 * delta, base + 3 * delta].
|
||||
* ...
|
||||
* base + SC_NGROUP * delta ( == 2 * base)
|
||||
* which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
|
||||
* (Note that currently SC_NGROUP is always 4, so the "..." is empty in
|
||||
* practice.)
|
||||
* Note that the last size class in the group is the next power of two (after
|
||||
* base), so that we've set up the induction correctly for the next group's
|
||||
* selection of delta.
|
||||
*
|
||||
* Now, let's start considering the first few size classes. Two extra constants
|
||||
* come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
|
||||
* correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
|
||||
* are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
|
||||
* never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
|
||||
* highest required alignment of a platform. For allocation sizes smaller than
|
||||
* (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
|
||||
* platforms with types with alignment larger than their size). To allow such
|
||||
* allocations (without wasting space unnecessarily), we introduce tiny size
|
||||
* classes; one per power of two, up until we hit the quantum size. There are
|
||||
* therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
|
||||
*
|
||||
* Next, we have a size class of size (1 << LG_QUANTUM). This can't be the
|
||||
* start of a group in the sense we described above (covering a power of two
|
||||
* range) since, if we divided into it to pick a value of delta, we'd get a
|
||||
* delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which
|
||||
* is against the rules.
|
||||
*
|
||||
* The first base we can divide by SC_NGROUP while still being at least
|
||||
* (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
|
||||
* having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
|
||||
* classes are:
|
||||
* 1 * (1 << LG_QUANTUM)
|
||||
* 2 * (1 << LG_QUANTUM)
|
||||
* 3 * (1 << LG_QUANTUM)
|
||||
* ... (although, as above, this "..." is empty in practice)
|
||||
* SC_NGROUP * (1 << LG_QUANTUM).
|
||||
*
|
||||
* There are SC_NGROUP of these size classes, so we can regard it as a sort of
|
||||
* pseudo-group, even though it spans multiple powers of 2, is divided
|
||||
* differently, and both starts and ends on a power of 2 (as opposed to just
|
||||
* ending). SC_NGROUP is itself a power of two, so the first group after the
|
||||
* pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
|
||||
* lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
|
||||
* sizes without violating our LG_QUANTUM requirements, so we can safely set
|
||||
* lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
|
||||
*
|
||||
* So, in order, the size classes are:
|
||||
*
|
||||
* Tiny size classes:
|
||||
* - Count: LG_QUANTUM - SC_LG_TINY_MIN.
|
||||
* - Sizes:
|
||||
* 1 << SC_LG_TINY_MIN
|
||||
* 1 << (SC_LG_TINY_MIN + 1)
|
||||
* 1 << (SC_LG_TINY_MIN + 2)
|
||||
* ...
|
||||
* 1 << (LG_QUANTUM - 1)
|
||||
*
|
||||
* Initial pseudo-group:
|
||||
* - Count: SC_NGROUP
|
||||
* - Sizes:
|
||||
* 1 * (1 << LG_QUANTUM)
|
||||
* 2 * (1 << LG_QUANTUM)
|
||||
* 3 * (1 << LG_QUANTUM)
|
||||
* ...
|
||||
* SC_NGROUP * (1 << LG_QUANTUM)
|
||||
*
|
||||
* Regular group 0:
|
||||
* - Count: SC_NGROUP
|
||||
* - Sizes:
|
||||
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
|
||||
* lg_base - SC_LG_NGROUP)
|
||||
* (1 << lg_base) + 1 * (1 << lg_delta)
|
||||
* (1 << lg_base) + 2 * (1 << lg_delta)
|
||||
* (1 << lg_base) + 3 * (1 << lg_delta)
|
||||
* ...
|
||||
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
|
||||
*
|
||||
* Regular group 1:
|
||||
* - Count: SC_NGROUP
|
||||
* - Sizes:
|
||||
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
|
||||
* lg_base - SC_LG_NGROUP)
|
||||
* (1 << lg_base) + 1 * (1 << lg_delta)
|
||||
* (1 << lg_base) + 2 * (1 << lg_delta)
|
||||
* (1 << lg_base) + 3 * (1 << lg_delta)
|
||||
* ...
|
||||
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
|
||||
*
|
||||
* ...
|
||||
*
|
||||
* Regular group N:
|
||||
* - Count: SC_NGROUP
|
||||
* - Sizes:
|
||||
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
|
||||
* lg_base - SC_LG_NGROUP)
|
||||
* (1 << lg_base) + 1 * (1 << lg_delta)
|
||||
* (1 << lg_base) + 2 * (1 << lg_delta)
|
||||
* (1 << lg_base) + 3 * (1 << lg_delta)
|
||||
* ...
|
||||
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
|
||||
*
|
||||
*
|
||||
* Representation of metadata:
|
||||
* To make the math easy, we'll mostly work in lg quantities. We record lg_base,
|
||||
* lg_delta, and ndelta (i.e. number of deltas above the base) on a
|
||||
* per-size-class basis, and maintain the invariant that, across all size
|
||||
* classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
|
||||
*
|
||||
* For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
|
||||
* lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
|
||||
*
|
||||
* For the initial tiny size classes (if any), lg_base is lg(size class size).
|
||||
* lg_delta is lg_base for the first size class, and lg_base - 1 for all
|
||||
* subsequent ones. ndelta is always 0.
|
||||
*
|
||||
* For the pseudo-group, if there are no tiny size classes, then we set
|
||||
* lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
|
||||
* to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
|
||||
* is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
|
||||
* indeed get a power of two that way). If there *are* tiny size classes, then
|
||||
* the first size class needs to have lg_delta relative to the largest tiny size
|
||||
* class. We therefore set lg_base == LG_QUANTUM - 1,
|
||||
* lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
|
||||
* pseudo-group the same.
|
||||
*
|
||||
*
|
||||
* Other terminology:
|
||||
* "Small" size classes mean those that are allocated out of bins, which is the
|
||||
* same as those that are slab allocated.
|
||||
* "Large" size classes are those that are not small. The cutoff for counting as
|
||||
* large is page size * group size.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
|
||||
*/
|
||||
#define SC_LG_NGROUP 2
|
||||
#define SC_LG_TINY_MIN 3
|
||||
|
||||
#if SC_LG_TINY_MIN == 0
|
||||
/* The div module doesn't support division by 1, which this would require. */
|
||||
#error "Unsupported LG_TINY_MIN"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The definitions below are all determined by the above settings and system
|
||||
* characteristics.
|
||||
*/
|
||||
#define SC_NGROUP (1ULL << SC_LG_NGROUP)
|
||||
#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
|
||||
#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
|
||||
#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
|
||||
#define SC_NPSEUDO SC_NGROUP
|
||||
#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
|
||||
/*
|
||||
* We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
|
||||
* we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
|
||||
* size class shorter than the others).
|
||||
* We could probably save some space in arenas by capping this at LG_VADDR size.
|
||||
*/
|
||||
#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
|
||||
#define SC_NREGULAR (SC_NGROUP * \
|
||||
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
|
||||
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
|
||||
|
||||
/* The number of size classes that are a multiple of the page size. */
|
||||
#define SC_NPSIZES ( \
|
||||
/* Start with all the size classes. */ \
|
||||
SC_NSIZES \
|
||||
/* Subtract out those groups with too small a base. */ \
|
||||
- (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
|
||||
/* And the pseudo-group. */ \
|
||||
- SC_NPSEUDO \
|
||||
/* And the tiny group. */ \
|
||||
- SC_NTINY \
|
||||
/* Sizes where ndelta*delta is not a multiple of the page size. */ \
|
||||
- (SC_LG_NGROUP * SC_NGROUP))
|
||||
/*
|
||||
* Note that the last line is computed as the sum of the second column in the
|
||||
* following table:
|
||||
* lg(base) | count of sizes to exclude
|
||||
* ------------------------------|-----------------------------
|
||||
* LG_PAGE - 1 | SC_NGROUP - 1
|
||||
* LG_PAGE | SC_NGROUP - 1
|
||||
* LG_PAGE + 1 | SC_NGROUP - 2
|
||||
* LG_PAGE + 2 | SC_NGROUP - 4
|
||||
* ... | ...
|
||||
* LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
|
||||
*/
|
||||
|
||||
/*
|
||||
* We declare a size class is binnable if size < page size * group. Or, in other
|
||||
* words, lg(size) < lg(page size) + lg(group size).
|
||||
*/
|
||||
#define SC_NBINS ( \
|
||||
/* Sub-regular size classes. */ \
|
||||
SC_NTINY + SC_NPSEUDO \
|
||||
/* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
|
||||
+ SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
|
||||
/* Last SC of the last group hits the bound exactly; exclude it. */ \
|
||||
- 1)
|
||||
|
||||
/*
|
||||
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
|
||||
* cannot support more than 256 small size classes.
|
||||
*/
|
||||
#if (SC_NBINS > 256)
|
||||
# error "Too many small size classes"
|
||||
#endif
|
||||
|
||||
/* The largest size class in the lookup table. */
|
||||
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
|
||||
|
||||
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
|
||||
#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
|
||||
#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
|
||||
|
||||
/* The largest size class allocated out of a slab. */
|
||||
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
|
||||
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
|
||||
|
||||
/* The smallest size class not allocated out of a slab. */
|
||||
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
|
||||
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
|
||||
|
||||
/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
|
||||
#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
|
||||
#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
|
||||
|
||||
/* The largest size class supported. */
|
||||
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
|
||||
|
||||
typedef struct sc_s sc_t;
|
||||
struct sc_s {
|
||||
/* Size class index, or -1 if not a valid size class. */
|
||||
int index;
|
||||
/* Lg group base size (no deltas added). */
|
||||
int lg_base;
|
||||
/* Lg delta to previous size class. */
|
||||
int lg_delta;
|
||||
/* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
|
||||
int ndelta;
|
||||
/*
|
||||
* True if the size class is a multiple of the page size, false
|
||||
* otherwise.
|
||||
*/
|
||||
bool psz;
|
||||
/*
|
||||
* True if the size class is a small, bin, size class. False otherwise.
|
||||
*/
|
||||
bool bin;
|
||||
/* The slab page count if a small bin size class, 0 otherwise. */
|
||||
int pgs;
|
||||
/* Same as lg_delta if a lookup table size class, 0 otherwise. */
|
||||
int lg_delta_lookup;
|
||||
};
|
||||
|
||||
typedef struct sc_data_s sc_data_t;
|
||||
struct sc_data_s {
|
||||
/* Number of tiny size classes. */
|
||||
unsigned ntiny;
|
||||
/* Number of bins supported by the lookup table. */
|
||||
int nlbins;
|
||||
/* Number of small size class bins. */
|
||||
int nbins;
|
||||
/* Number of size classes. */
|
||||
int nsizes;
|
||||
/* Number of bits required to store NSIZES. */
|
||||
int lg_ceil_nsizes;
|
||||
/* Number of size classes that are a multiple of (1U << LG_PAGE). */
|
||||
unsigned npsizes;
|
||||
/* Lg of maximum tiny size class (or -1, if none). */
|
||||
int lg_tiny_maxclass;
|
||||
/* Maximum size class included in lookup table. */
|
||||
size_t lookup_maxclass;
|
||||
/* Maximum small size class. */
|
||||
size_t small_maxclass;
|
||||
/* Lg of minimum large size class. */
|
||||
int lg_large_minclass;
|
||||
/* The minimum large size class. */
|
||||
size_t large_minclass;
|
||||
/* Maximum (large) size class. */
|
||||
size_t large_maxclass;
|
||||
/* True if the sc_data_t has been initialized (for debugging only). */
|
||||
bool initialized;
|
||||
|
||||
sc_t sc[SC_NSIZES];
|
||||
};
|
||||
|
||||
void sc_data_init(sc_data_t *data);
|
||||
/*
|
||||
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
|
||||
* Otherwise, does its best to accomodate the request.
|
||||
*/
|
||||
void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
|
||||
int pgs);
|
||||
void sc_boot(sc_data_t *data);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_SC_H */
|
||||
55
deps/jemalloc/include/jemalloc/internal/seq.h
vendored
Normal file
55
deps/jemalloc/include/jemalloc/internal/seq.h
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
#ifndef JEMALLOC_INTERNAL_SEQ_H
|
||||
#define JEMALLOC_INTERNAL_SEQ_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
|
||||
/*
|
||||
* A simple seqlock implementation.
|
||||
*/
|
||||
|
||||
#define seq_define(type, short_type) \
|
||||
typedef struct { \
|
||||
atomic_zu_t seq; \
|
||||
atomic_zu_t data[ \
|
||||
(sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \
|
||||
} seq_##short_type##_t; \
|
||||
\
|
||||
/* \
|
||||
* No internal synchronization -- the caller must ensure that there's \
|
||||
* only a single writer at a time. \
|
||||
*/ \
|
||||
static inline void \
|
||||
seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \
|
||||
size_t buf[sizeof(dst->data) / sizeof(size_t)]; \
|
||||
buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \
|
||||
memcpy(buf, src, sizeof(type)); \
|
||||
size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \
|
||||
atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \
|
||||
atomic_fence(ATOMIC_RELEASE); \
|
||||
for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
|
||||
atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \
|
||||
} \
|
||||
atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \
|
||||
} \
|
||||
\
|
||||
/* Returns whether or not the read was consistent. */ \
|
||||
static inline bool \
|
||||
seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \
|
||||
size_t buf[sizeof(src->data) / sizeof(size_t)]; \
|
||||
size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \
|
||||
if (seq1 % 2 != 0) { \
|
||||
return false; \
|
||||
} \
|
||||
for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
|
||||
buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \
|
||||
} \
|
||||
atomic_fence(ATOMIC_ACQUIRE); \
|
||||
size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \
|
||||
if (seq1 != seq2) { \
|
||||
return false; \
|
||||
} \
|
||||
memcpy(dst, buf, sizeof(type)); \
|
||||
return true; \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_SEQ_H */
|
||||
1428
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
Normal file
1428
deps/jemalloc/include/jemalloc/internal/size_classes.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
232
deps/jemalloc/include/jemalloc/internal/smoothstep.h
vendored
Normal file
232
deps/jemalloc/include/jemalloc/internal/smoothstep.h
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
|
||||
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
|
||||
|
||||
/*
|
||||
* This file was generated by the following command:
|
||||
* sh smoothstep.sh smoother 200 24 3 15
|
||||
*/
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* This header defines a precomputed table based on the smoothstep family of
|
||||
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
|
||||
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
|
||||
* that floating point math can be avoided.
|
||||
*
|
||||
* 3 2
|
||||
* smoothstep(x) = -2x + 3x
|
||||
*
|
||||
* 5 4 3
|
||||
* smootherstep(x) = 6x - 15x + 10x
|
||||
*
|
||||
* 7 6 5 4
|
||||
* smootheststep(x) = -20x + 70x - 84x + 35x
|
||||
*/
|
||||
|
||||
#define SMOOTHSTEP_VARIANT "smoother"
|
||||
#define SMOOTHSTEP_NSTEPS 200
|
||||
#define SMOOTHSTEP_BFP 24
|
||||
#define SMOOTHSTEP \
|
||||
/* STEP(step, h, x, y) */ \
|
||||
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
|
||||
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
|
||||
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
|
||||
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
|
||||
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
|
||||
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
|
||||
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
|
||||
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
|
||||
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
|
||||
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
|
||||
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
|
||||
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
|
||||
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
|
||||
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
|
||||
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
|
||||
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
|
||||
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
|
||||
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
|
||||
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
|
||||
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
|
||||
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
|
||||
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
|
||||
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
|
||||
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
|
||||
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
|
||||
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
|
||||
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
|
||||
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
|
||||
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
|
||||
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
|
||||
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
|
||||
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
|
||||
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
|
||||
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
|
||||
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
|
||||
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
|
||||
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
|
||||
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
|
||||
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
|
||||
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
|
||||
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
|
||||
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
|
||||
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
|
||||
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
|
||||
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
|
||||
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
|
||||
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
|
||||
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
|
||||
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
|
||||
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
|
||||
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
|
||||
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
|
||||
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
|
||||
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
|
||||
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
|
||||
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
|
||||
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
|
||||
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
|
||||
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
|
||||
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
|
||||
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
|
||||
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
|
||||
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
|
||||
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
|
||||
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
|
||||
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
|
||||
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
|
||||
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
|
||||
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
|
||||
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
|
||||
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
|
||||
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
|
||||
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
|
||||
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
|
||||
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
|
||||
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
|
||||
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
|
||||
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
|
||||
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
|
||||
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
|
||||
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
|
||||
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
|
||||
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
|
||||
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
|
||||
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
|
||||
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
|
||||
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
|
||||
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
|
||||
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
|
||||
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
|
||||
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
|
||||
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
|
||||
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
|
||||
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
|
||||
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
|
||||
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
|
||||
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
|
||||
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
|
||||
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
|
||||
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
|
||||
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
|
||||
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
|
||||
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
|
||||
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
|
||||
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
|
||||
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
|
||||
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
|
||||
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
|
||||
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
|
||||
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
|
||||
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
|
||||
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
|
||||
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
|
||||
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
|
||||
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
|
||||
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
|
||||
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
|
||||
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
|
||||
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
|
||||
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
|
||||
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
|
||||
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
|
||||
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
|
||||
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
|
||||
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
|
||||
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
|
||||
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
|
||||
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
|
||||
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
|
||||
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
|
||||
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
|
||||
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
|
||||
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
|
||||
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
|
||||
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
|
||||
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
|
||||
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
|
||||
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
|
||||
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
|
||||
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
|
||||
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
|
||||
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
|
||||
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
|
||||
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
|
||||
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
|
||||
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
|
||||
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
|
||||
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
|
||||
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
|
||||
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
|
||||
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
|
||||
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
|
||||
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
|
||||
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
|
||||
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
|
||||
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
|
||||
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
|
||||
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
|
||||
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
|
||||
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
|
||||
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
|
||||
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
|
||||
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
|
||||
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
|
||||
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
|
||||
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
|
||||
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
|
||||
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
|
||||
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
|
||||
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
|
||||
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
|
||||
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
|
||||
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
|
||||
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
|
||||
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
|
||||
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
|
||||
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
|
||||
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
|
||||
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
|
||||
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
|
||||
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
|
||||
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
|
||||
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
|
||||
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
|
||||
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
|
||||
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
|
||||
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
|
||||
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
|
||||
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
|
||||
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
|
||||
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
|
||||
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
|
||||
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
|
||||
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
|
||||
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
|
||||
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
|
||||
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
|
||||
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
|
||||
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
|
||||
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
|
||||
40
deps/jemalloc/include/jemalloc/internal/spin.h
vendored
Normal file
40
deps/jemalloc/include/jemalloc/internal/spin.h
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
#ifndef JEMALLOC_INTERNAL_SPIN_H
|
||||
#define JEMALLOC_INTERNAL_SPIN_H
|
||||
|
||||
#define SPIN_INITIALIZER {0U}
|
||||
|
||||
typedef struct {
|
||||
unsigned iteration;
|
||||
} spin_t;
|
||||
|
||||
static inline void
|
||||
spin_cpu_spinwait() {
|
||||
# if HAVE_CPU_SPINWAIT
|
||||
CPU_SPINWAIT;
|
||||
# else
|
||||
volatile int x = 0;
|
||||
x = x;
|
||||
# endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
spin_adaptive(spin_t *spin) {
|
||||
volatile uint32_t i;
|
||||
|
||||
if (spin->iteration < 5) {
|
||||
for (i = 0; i < (1U << spin->iteration); i++) {
|
||||
spin_cpu_spinwait();
|
||||
}
|
||||
spin->iteration++;
|
||||
} else {
|
||||
#ifdef _WIN32
|
||||
SwitchToThread();
|
||||
#else
|
||||
sched_yield();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#undef SPIN_INLINE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_SPIN_H */
|
||||
31
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
Normal file
31
deps/jemalloc/include/jemalloc/internal/stats.h
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
#ifndef JEMALLOC_INTERNAL_STATS_H
|
||||
#define JEMALLOC_INTERNAL_STATS_H
|
||||
|
||||
/* OPTION(opt, var_name, default, set_value_to) */
|
||||
#define STATS_PRINT_OPTIONS \
|
||||
OPTION('J', json, false, true) \
|
||||
OPTION('g', general, true, false) \
|
||||
OPTION('m', merged, config_stats, false) \
|
||||
OPTION('d', destroyed, config_stats, false) \
|
||||
OPTION('a', unmerged, config_stats, false) \
|
||||
OPTION('b', bins, true, false) \
|
||||
OPTION('l', large, true, false) \
|
||||
OPTION('x', mutex, true, false) \
|
||||
OPTION('e', extents, true, false)
|
||||
|
||||
enum {
|
||||
#define OPTION(o, v, d, s) stats_print_option_num_##v,
|
||||
STATS_PRINT_OPTIONS
|
||||
#undef OPTION
|
||||
stats_print_tot_num_options
|
||||
};
|
||||
|
||||
/* Options for stats_print. */
|
||||
extern bool opt_stats_print;
|
||||
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
||||
|
||||
/* Implements je_malloc_stats_print. */
|
||||
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_H */
|
||||
12
deps/jemalloc/include/jemalloc/internal/stats_tsd.h
vendored
Normal file
12
deps/jemalloc/include/jemalloc/internal/stats_tsd.h
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef JEMALLOC_INTERNAL_STATS_TSD_H
|
||||
#define JEMALLOC_INTERNAL_STATS_TSD_H
|
||||
|
||||
typedef struct tcache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
} tcache_bin_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */
|
||||
318
deps/jemalloc/include/jemalloc/internal/sz.h
vendored
Normal file
318
deps/jemalloc/include/jemalloc/internal/sz.h
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
#ifndef JEMALLOC_INTERNAL_SIZE_H
|
||||
#define JEMALLOC_INTERNAL_SIZE_H
|
||||
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/*
|
||||
* sz module: Size computations.
|
||||
*
|
||||
* Some abbreviations used here:
|
||||
* p: Page
|
||||
* ind: Index
|
||||
* s, sz: Size
|
||||
* u: Usable size
|
||||
* a: Aligned
|
||||
*
|
||||
* These are not always used completely consistently, but should be enough to
|
||||
* interpret function names. E.g. sz_psz2ind converts page size to page size
|
||||
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
|
||||
* size that would result from such an allocation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* sz_pind2sz_tab encodes the same information as could be computed by
|
||||
* sz_pind2sz_compute().
|
||||
*/
|
||||
extern size_t sz_pind2sz_tab[SC_NPSIZES + 1];
|
||||
/*
|
||||
* sz_index2size_tab encodes the same information as could be computed (at
|
||||
* unacceptable cost in some code paths) by sz_index2size_compute().
|
||||
*/
|
||||
extern size_t sz_index2size_tab[SC_NSIZES];
|
||||
/*
|
||||
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
|
||||
* size classes. In order to reduce cache footprint, the table is compressed,
|
||||
* and all accesses are via sz_size2index().
|
||||
*/
|
||||
extern uint8_t sz_size2index_tab[];
|
||||
|
||||
static const size_t sz_large_pad =
|
||||
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||
PAGE
|
||||
#else
|
||||
0
|
||||
#endif
|
||||
;
|
||||
|
||||
extern void sz_boot(const sc_data_t *sc_data);
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||
sz_psz2ind(size_t psz) {
|
||||
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
|
||||
return SC_NPSIZES;
|
||||
}
|
||||
pszind_t x = lg_floor((psz<<1)-1);
|
||||
pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ?
|
||||
0 : x - (SC_LG_NGROUP + LG_PAGE);
|
||||
pszind_t grp = shift << SC_LG_NGROUP;
|
||||
|
||||
pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
|
||||
LG_PAGE : x - SC_LG_NGROUP - 1;
|
||||
|
||||
size_t delta_inverse_mask = ZU(-1) << lg_delta;
|
||||
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
|
||||
((ZU(1) << SC_LG_NGROUP) - 1);
|
||||
|
||||
pszind_t ind = grp + mod;
|
||||
return ind;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
sz_pind2sz_compute(pszind_t pind) {
|
||||
if (unlikely(pind == SC_NPSIZES)) {
|
||||
return SC_LARGE_MAXCLASS + PAGE;
|
||||
}
|
||||
size_t grp = pind >> SC_LG_NGROUP;
|
||||
size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
|
||||
|
||||
size_t grp_size_mask = ~((!!grp)-1);
|
||||
size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp)
|
||||
& grp_size_mask;
|
||||
|
||||
size_t shift = (grp == 0) ? 1 : grp;
|
||||
size_t lg_delta = shift + (LG_PAGE-1);
|
||||
size_t mod_size = (mod+1) << lg_delta;
|
||||
|
||||
size_t sz = grp_size + mod_size;
|
||||
return sz;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
sz_pind2sz_lookup(pszind_t pind) {
|
||||
size_t ret = (size_t)sz_pind2sz_tab[pind];
|
||||
assert(ret == sz_pind2sz_compute(pind));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
sz_pind2sz(pszind_t pind) {
|
||||
assert(pind < SC_NPSIZES + 1);
|
||||
return sz_pind2sz_lookup(pind);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
sz_psz2u(size_t psz) {
|
||||
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
|
||||
return SC_LARGE_MAXCLASS + PAGE;
|
||||
}
|
||||
size_t x = lg_floor((psz<<1)-1);
|
||||
size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
|
||||
LG_PAGE : x - SC_LG_NGROUP - 1;
|
||||
size_t delta = ZU(1) << lg_delta;
|
||||
size_t delta_mask = delta - 1;
|
||||
size_t usize = (psz + delta_mask) & ~delta_mask;
|
||||
return usize;
|
||||
}
|
||||
|
||||
static inline szind_t
|
||||
sz_size2index_compute(size_t size) {
|
||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||
return SC_NSIZES;
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
return 0;
|
||||
}
|
||||
#if (SC_NTINY != 0)
|
||||
if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
|
||||
szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
|
||||
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
||||
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
|
||||
}
|
||||
#endif
|
||||
{
|
||||
szind_t x = lg_floor((size<<1)-1);
|
||||
szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 :
|
||||
x - (SC_LG_NGROUP + LG_QUANTUM);
|
||||
szind_t grp = shift << SC_LG_NGROUP;
|
||||
|
||||
szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
|
||||
? LG_QUANTUM : x - SC_LG_NGROUP - 1;
|
||||
|
||||
size_t delta_inverse_mask = ZU(-1) << lg_delta;
|
||||
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
|
||||
((ZU(1) << SC_LG_NGROUP) - 1);
|
||||
|
||||
szind_t index = SC_NTINY + grp + mod;
|
||||
return index;
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
sz_size2index_lookup(size_t size) {
|
||||
assert(size <= SC_LOOKUP_MAXCLASS);
|
||||
szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
|
||||
>> SC_LG_TINY_MIN]);
|
||||
assert(ret == sz_size2index_compute(size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
sz_size2index(size_t size) {
|
||||
if (likely(size <= SC_LOOKUP_MAXCLASS)) {
|
||||
return sz_size2index_lookup(size);
|
||||
}
|
||||
return sz_size2index_compute(size);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
sz_index2size_compute(szind_t index) {
|
||||
#if (SC_NTINY > 0)
|
||||
if (index < SC_NTINY) {
|
||||
return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index));
|
||||
}
|
||||
#endif
|
||||
{
|
||||
size_t reduced_index = index - SC_NTINY;
|
||||
size_t grp = reduced_index >> SC_LG_NGROUP;
|
||||
size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
|
||||
1);
|
||||
|
||||
size_t grp_size_mask = ~((!!grp)-1);
|
||||
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
|
||||
(SC_LG_NGROUP-1))) << grp) & grp_size_mask;
|
||||
|
||||
size_t shift = (grp == 0) ? 1 : grp;
|
||||
size_t lg_delta = shift + (LG_QUANTUM-1);
|
||||
size_t mod_size = (mod+1) << lg_delta;
|
||||
|
||||
size_t usize = grp_size + mod_size;
|
||||
return usize;
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_index2size_lookup(szind_t index) {
|
||||
size_t ret = (size_t)sz_index2size_tab[index];
|
||||
assert(ret == sz_index2size_compute(index));
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_index2size(szind_t index) {
|
||||
assert(index < SC_NSIZES);
|
||||
return sz_index2size_lookup(index);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_s2u_compute(size_t size) {
|
||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
size++;
|
||||
}
|
||||
#if (SC_NTINY > 0)
|
||||
if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
|
||||
size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
|
||||
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
||||
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
|
||||
(ZU(1) << lg_ceil));
|
||||
}
|
||||
#endif
|
||||
{
|
||||
size_t x = lg_floor((size<<1)-1);
|
||||
size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
|
||||
? LG_QUANTUM : x - SC_LG_NGROUP - 1;
|
||||
size_t delta = ZU(1) << lg_delta;
|
||||
size_t delta_mask = delta - 1;
|
||||
size_t usize = (size + delta_mask) & ~delta_mask;
|
||||
return usize;
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_s2u_lookup(size_t size) {
|
||||
size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
|
||||
|
||||
assert(ret == sz_s2u_compute(size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute usable size that would result from allocating an object with the
|
||||
* specified size.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_s2u(size_t size) {
|
||||
if (likely(size <= SC_LOOKUP_MAXCLASS)) {
|
||||
return sz_s2u_lookup(size);
|
||||
}
|
||||
return sz_s2u_compute(size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute usable size that would result from allocating an object with the
|
||||
* specified size and alignment.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_sa2u(size_t size, size_t alignment) {
|
||||
size_t usize;
|
||||
|
||||
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
|
||||
|
||||
/* Try for a small size class. */
|
||||
if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) {
|
||||
/*
|
||||
* Round size up to the nearest multiple of alignment.
|
||||
*
|
||||
* This done, we can take advantage of the fact that for each
|
||||
* small size class, every object is aligned at the smallest
|
||||
* power of two that is non-zero in the base two representation
|
||||
* of the size. For example:
|
||||
*
|
||||
* Size | Base 2 | Minimum alignment
|
||||
* -----+----------+------------------
|
||||
* 96 | 1100000 | 32
|
||||
* 144 | 10100000 | 32
|
||||
* 192 | 11000000 | 64
|
||||
*/
|
||||
usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
|
||||
if (usize < SC_LARGE_MINCLASS) {
|
||||
return usize;
|
||||
}
|
||||
}
|
||||
|
||||
/* Large size class. Beware of overflow. */
|
||||
|
||||
if (unlikely(alignment > SC_LARGE_MAXCLASS)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Make sure result is a large size class. */
|
||||
if (size <= SC_LARGE_MINCLASS) {
|
||||
usize = SC_LARGE_MINCLASS;
|
||||
} else {
|
||||
usize = sz_s2u(size);
|
||||
if (usize < size) {
|
||||
/* size_t overflow. */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the multi-page mapping that large_palloc() would need in
|
||||
* order to guarantee the alignment.
|
||||
*/
|
||||
if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
|
||||
/* size_t overflow. */
|
||||
return 0;
|
||||
}
|
||||
return usize;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_SIZE_H */
|
||||
443
deps/jemalloc/include/jemalloc/internal/tcache.h
vendored
Normal file
443
deps/jemalloc/include/jemalloc/internal/tcache.h
vendored
Normal file
@@ -0,0 +1,443 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct tcache_bin_info_s tcache_bin_info_t;
|
||||
typedef struct tcache_bin_s tcache_bin_t;
|
||||
typedef struct tcache_s tcache_t;
|
||||
|
||||
/*
|
||||
* tcache pointers close to NULL are used to encode state information that is
|
||||
* used for two purposes: preventing thread caching on a per thread basis and
|
||||
* cleaning up during thread shutdown.
|
||||
*/
|
||||
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
||||
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
||||
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||
|
||||
/*
|
||||
* Absolute maximum number of cache slots for each small bin in the thread
|
||||
* cache. This is an additional constraint beyond that imposed as: twice the
|
||||
* number of regions per run for this size class.
|
||||
*
|
||||
* This constant must be an even number.
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||
|
||||
/* Number of cache slots for large size classes. */
|
||||
#define TCACHE_NSLOTS_LARGE 20
|
||||
|
||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
|
||||
/*
|
||||
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
||||
* full GC sweeps. Integer rounding may cause the actual number to be
|
||||
* slightly higher, since GC is performed incrementally.
|
||||
*/
|
||||
#define TCACHE_GC_SWEEP 8192
|
||||
|
||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||
#define TCACHE_GC_INCR \
|
||||
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
typedef enum {
|
||||
tcache_enabled_false = 0, /* Enable cast to/from bool. */
|
||||
tcache_enabled_true = 1,
|
||||
tcache_enabled_default = 2
|
||||
} tcache_enabled_t;
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
struct tcache_bin_info_s {
|
||||
unsigned ncached_max; /* Upper limit on ncached. */
|
||||
};
|
||||
|
||||
struct tcache_bin_s {
|
||||
tcache_bin_stats_t tstats;
|
||||
int low_water; /* Min # cached since last GC. */
|
||||
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
||||
unsigned ncached; /* # of cached objects. */
|
||||
void **avail; /* Stack of available objects. */
|
||||
};
|
||||
|
||||
struct tcache_s {
|
||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
||||
arena_t *arena; /* This thread's arena. */
|
||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||
unsigned next_gc_bin; /* Next bin to GC. */
|
||||
tcache_bin_t tbins[1]; /* Dynamically sized. */
|
||||
/*
|
||||
* The pointer stacks associated with tbins follow as a contiguous
|
||||
* array. During tcache initialization, the avail pointer in each
|
||||
* element of tbins is initialized to point to the proper offset within
|
||||
* this array.
|
||||
*/
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_tcache;
|
||||
extern ssize_t opt_lg_tcache_max;
|
||||
|
||||
extern tcache_bin_info_t *tcache_bin_info;
|
||||
|
||||
/*
|
||||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||
* large-object bins.
|
||||
*/
|
||||
extern size_t nhbins;
|
||||
|
||||
/* Maximum cached size class. */
|
||||
extern size_t tcache_maxclass;
|
||||
|
||||
size_t tcache_salloc(const void *ptr);
|
||||
void tcache_event_hard(tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
||||
size_t binind);
|
||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
||||
void tcache_arena_dissociate(tcache_t *tcache);
|
||||
tcache_t *tcache_create(arena_t *arena);
|
||||
void tcache_destroy(tcache_t *tcache);
|
||||
void tcache_thread_cleanup(void *arg);
|
||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
||||
bool tcache_boot0(void);
|
||||
bool tcache_boot1(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
|
||||
|
||||
void tcache_event(tcache_t *tcache);
|
||||
void tcache_flush(void);
|
||||
bool tcache_enabled_get(void);
|
||||
tcache_t *tcache_get(bool create);
|
||||
void tcache_enabled_set(bool enabled);
|
||||
void *tcache_alloc_easy(tcache_bin_t *tbin);
|
||||
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
|
||||
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
|
||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
|
||||
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
||||
/* Map of thread-specific caches. */
|
||||
malloc_tsd_externs(tcache, tcache_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
|
||||
tcache_thread_cleanup)
|
||||
/* Per thread flag that allows thread caches to be disabled. */
|
||||
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
|
||||
tcache_enabled_default, malloc_tsd_no_cleanup)
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_flush(void)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache = *tcache_tsd_get();
|
||||
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
|
||||
return;
|
||||
tcache_destroy(tcache);
|
||||
tcache = NULL;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
tcache_enabled_get(void)
|
||||
{
|
||||
tcache_enabled_t tcache_enabled;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache_enabled = *tcache_enabled_tsd_get();
|
||||
if (tcache_enabled == tcache_enabled_default) {
|
||||
tcache_enabled = (tcache_enabled_t)opt_tcache;
|
||||
tcache_enabled_tsd_set(&tcache_enabled);
|
||||
}
|
||||
|
||||
return ((bool)tcache_enabled);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_enabled_set(bool enabled)
|
||||
{
|
||||
tcache_enabled_t tcache_enabled;
|
||||
tcache_t *tcache;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache_enabled = (tcache_enabled_t)enabled;
|
||||
tcache_enabled_tsd_set(&tcache_enabled);
|
||||
tcache = *tcache_tsd_get();
|
||||
if (enabled) {
|
||||
if (tcache == TCACHE_STATE_DISABLED) {
|
||||
tcache = NULL;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
} else /* disabled */ {
|
||||
if (tcache > TCACHE_STATE_MAX) {
|
||||
tcache_destroy(tcache);
|
||||
tcache = NULL;
|
||||
}
|
||||
if (tcache == NULL) {
|
||||
tcache = TCACHE_STATE_DISABLED;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcache_get(bool create)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
if (config_tcache == false)
|
||||
return (NULL);
|
||||
if (config_lazy_lock && isthreaded == false)
|
||||
return (NULL);
|
||||
|
||||
tcache = *tcache_tsd_get();
|
||||
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
|
||||
if (tcache == TCACHE_STATE_DISABLED)
|
||||
return (NULL);
|
||||
if (tcache == NULL) {
|
||||
if (create == false) {
|
||||
/*
|
||||
* Creating a tcache here would cause
|
||||
* allocation as a side effect of free().
|
||||
* Ordinarily that would be okay since
|
||||
* tcache_create() failure is a soft failure
|
||||
* that doesn't propagate. However, if TLS
|
||||
* data are freed via free() as in glibc,
|
||||
* subtle corruption could result from setting
|
||||
* a TLS variable after its backing memory is
|
||||
* freed.
|
||||
*/
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache_enabled_get() == false) {
|
||||
tcache_enabled_set(false); /* Memoize. */
|
||||
return (NULL);
|
||||
}
|
||||
return (tcache_create(choose_arena(NULL)));
|
||||
}
|
||||
if (tcache == TCACHE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that an allocator function was called
|
||||
* after tcache_thread_cleanup() was called.
|
||||
*/
|
||||
tcache = TCACHE_STATE_REINCARNATED;
|
||||
tcache_tsd_set(&tcache);
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache == TCACHE_STATE_REINCARNATED)
|
||||
return (NULL);
|
||||
not_reached();
|
||||
}
|
||||
|
||||
return (tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_event(tcache_t *tcache)
|
||||
{
|
||||
|
||||
if (TCACHE_GC_INCR == 0)
|
||||
return;
|
||||
|
||||
tcache->ev_cnt++;
|
||||
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
|
||||
if (tcache->ev_cnt == TCACHE_GC_INCR)
|
||||
tcache_event_hard(tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_easy(tcache_bin_t *tbin)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (tbin->ncached == 0) {
|
||||
tbin->low_water = -1;
|
||||
return (NULL);
|
||||
}
|
||||
tbin->ncached--;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
tbin->low_water = tbin->ncached;
|
||||
ret = tbin->avail[tbin->ncached];
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
binind = SMALL_SIZE2BIN(size);
|
||||
assert(binind < NBINS);
|
||||
tbin = &tcache->tbins[binind];
|
||||
size = arena_bin_info[binind].reg_size;
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (ret == NULL) {
|
||||
ret = tcache_alloc_small_hard(tcache, tbin, binind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
|
||||
|
||||
if (zero == false) {
|
||||
if (config_fill) {
|
||||
if (opt_junk) {
|
||||
arena_alloc_junk_small(ret,
|
||||
&arena_bin_info[binind], false);
|
||||
} else if (opt_zero)
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
} else {
|
||||
if (config_fill && opt_junk) {
|
||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||
true);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
|
||||
tcache_event(tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
size = PAGE_CEILING(size);
|
||||
assert(size <= tcache_maxclass);
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
assert(binind < nhbins);
|
||||
tbin = &tcache->tbins[binind];
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (ret == NULL) {
|
||||
/*
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
ret = arena_malloc_large(tcache->arena, size, zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
} else {
|
||||
if (config_prof && prof_promote && size == PAGE) {
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
LG_PAGE);
|
||||
arena_mapbits_large_binind_set(chunk, pageind,
|
||||
BININD_INVALID);
|
||||
}
|
||||
if (zero == false) {
|
||||
if (config_fill) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, size);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
} else {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += size;
|
||||
}
|
||||
|
||||
tcache_event(tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
|
||||
{
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (tbin->ncached == tbin_info->ncached_max) {
|
||||
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
|
||||
1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->avail[tbin->ncached] = ptr;
|
||||
tbin->ncached++;
|
||||
|
||||
tcache_event(tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
{
|
||||
size_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(ptr) <= tcache_maxclass);
|
||||
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
memset(ptr, 0x5a, size);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (tbin->ncached == tbin_info->ncached_max) {
|
||||
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
|
||||
1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->avail[tbin->ncached] = ptr;
|
||||
tbin->ncached++;
|
||||
|
||||
tcache_event(tcache);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
53
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
vendored
Normal file
53
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
|
||||
|
||||
extern bool opt_tcache;
|
||||
extern ssize_t opt_lg_tcache_max;
|
||||
|
||||
extern cache_bin_info_t *tcache_bin_info;
|
||||
|
||||
/*
|
||||
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
|
||||
* large-object bins.
|
||||
*/
|
||||
extern unsigned nhbins;
|
||||
|
||||
/* Maximum cached size class. */
|
||||
extern size_t tcache_maxclass;
|
||||
|
||||
/*
|
||||
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
|
||||
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
|
||||
* completely disjoint from this data structure. tcaches starts off as a sparse
|
||||
* array, so it has no physical memory footprint until individual pages are
|
||||
* touched. This allows the entire array to be allocated the first time an
|
||||
* explicit tcache is created without a disproportionate impact on memory usage.
|
||||
*/
|
||||
extern tcaches_t *tcaches;
|
||||
|
||||
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
|
||||
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
cache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
||||
szind_t binind, unsigned rem);
|
||||
void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache);
|
||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
||||
arena_t *arena);
|
||||
tcache_t *tcache_create_explicit(tsd_t *tsd);
|
||||
void tcache_cleanup(tsd_t *tsd);
|
||||
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
|
||||
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
|
||||
void tcaches_flush(tsd_t *tsd, unsigned ind);
|
||||
void tcaches_destroy(tsd_t *tsd, unsigned ind);
|
||||
bool tcache_boot(tsdn_t *tsdn);
|
||||
void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
|
||||
void tcache_prefork(tsdn_t *tsdn);
|
||||
void tcache_postfork_parent(tsdn_t *tsdn);
|
||||
void tcache_postfork_child(tsdn_t *tsdn);
|
||||
void tcache_flush(tsd_t *tsd);
|
||||
bool tsd_tcache_data_init(tsd_t *tsd);
|
||||
bool tsd_tcache_enabled_data_init(tsd_t *tsd);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
|
||||
227
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
vendored
Normal file
227
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
||||
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
static inline bool
|
||||
tcache_enabled_get(tsd_t *tsd) {
|
||||
return tsd_tcache_enabled_get(tsd);
|
||||
}
|
||||
|
||||
static inline void
|
||||
tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
||||
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
||||
|
||||
if (!was_enabled && enabled) {
|
||||
tsd_tcache_data_init(tsd);
|
||||
} else if (was_enabled && !enabled) {
|
||||
tcache_cleanup(tsd);
|
||||
}
|
||||
/* Commit the state last. Above calls check current state. */
|
||||
tsd_tcache_enabled_set(tsd, enabled);
|
||||
tsd_slow_update(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_event(tsd_t *tsd, tcache_t *tcache) {
|
||||
if (TCACHE_GC_INCR == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(ticker_tick(&tcache->gc_ticker))) {
|
||||
tcache_event_hard(tsd, tcache);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
cache_bin_t *bin;
|
||||
bool tcache_success;
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
assert(binind < SC_NBINS);
|
||||
bin = tcache_small_bin_get(tcache, binind);
|
||||
ret = cache_bin_alloc_easy(bin, &tcache_success);
|
||||
assert(tcache_success == (ret != NULL));
|
||||
if (unlikely(!tcache_success)) {
|
||||
bool tcache_hard_success;
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
|
||||
bin, binind, &tcache_hard_success);
|
||||
if (tcache_hard_success == false) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
assert(ret);
|
||||
/*
|
||||
* Only compute usize if required. The checks in the following if
|
||||
* statement are all static.
|
||||
*/
|
||||
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
||||
usize = sz_index2size(binind);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
|
||||
}
|
||||
|
||||
if (likely(!zero)) {
|
||||
if (slow_path && config_fill) {
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret, &bin_infos[binind],
|
||||
false);
|
||||
} else if (unlikely(opt_zero)) {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret, &bin_infos[binind], true);
|
||||
}
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
bin->tstats.nrequests++;
|
||||
}
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes += usize;
|
||||
}
|
||||
tcache_event(tsd, tcache);
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
cache_bin_t *bin;
|
||||
bool tcache_success;
|
||||
|
||||
assert(binind >= SC_NBINS &&binind < nhbins);
|
||||
bin = tcache_large_bin_get(tcache, binind);
|
||||
ret = cache_bin_alloc_easy(bin, &tcache_success);
|
||||
assert(tcache_success == (ret != NULL));
|
||||
if (unlikely(!tcache_success)) {
|
||||
/*
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
/* Only compute usize on demand */
|
||||
if (config_prof || (slow_path && config_fill) ||
|
||||
unlikely(zero)) {
|
||||
usize = sz_index2size(binind);
|
||||
assert(usize <= tcache_maxclass);
|
||||
}
|
||||
|
||||
if (likely(!zero)) {
|
||||
if (slow_path && config_fill) {
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
memset(ret, JEMALLOC_ALLOC_JUNK,
|
||||
usize);
|
||||
} else if (unlikely(opt_zero)) {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
bin->tstats.nrequests++;
|
||||
}
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes += usize;
|
||||
}
|
||||
}
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
bool slow_path) {
|
||||
cache_bin_t *bin;
|
||||
cache_bin_info_t *bin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
|
||||
<= SC_SMALL_MAXCLASS);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
arena_dalloc_junk_small(ptr, &bin_infos[binind]);
|
||||
}
|
||||
|
||||
bin = tcache_small_bin_get(tcache, binind);
|
||||
bin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) {
|
||||
tcache_bin_flush_small(tsd, tcache, bin, binind,
|
||||
(bin_info->ncached_max >> 1));
|
||||
bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr);
|
||||
assert(ret);
|
||||
}
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
bool slow_path) {
|
||||
cache_bin_t *bin;
|
||||
cache_bin_info_t *bin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
|
||||
> SC_SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
large_dalloc_junk(ptr, sz_index2size(binind));
|
||||
}
|
||||
|
||||
bin = tcache_large_bin_get(tcache, binind);
|
||||
bin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(bin->ncached == bin_info->ncached_max)) {
|
||||
tcache_bin_flush_large(tsd, bin, binind,
|
||||
(bin_info->ncached_max >> 1), tcache);
|
||||
}
|
||||
assert(bin->ncached < bin_info->ncached_max);
|
||||
bin->ncached++;
|
||||
*(bin->avail - bin->ncached) = ptr;
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcaches_get(tsd_t *tsd, unsigned ind) {
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
if (unlikely(elm->tcache == NULL)) {
|
||||
malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
|
||||
abort();
|
||||
} else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
|
||||
elm->tcache = tcache_create_explicit(tsd);
|
||||
}
|
||||
return elm->tcache;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
|
||||
70
deps/jemalloc/include/jemalloc/internal/tcache_structs.h
vendored
Normal file
70
deps/jemalloc/include/jemalloc/internal/tcache_structs.h
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/cache_bin.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
#include "jemalloc/internal/tsd_types.h"
|
||||
|
||||
/* Various uses of this struct need it to be a named type. */
|
||||
typedef ql_elm(tsd_t) tsd_link_t;
|
||||
|
||||
struct tcache_s {
|
||||
/*
|
||||
* To minimize our cache-footprint, we put the frequently accessed data
|
||||
* together at the start of this struct.
|
||||
*/
|
||||
|
||||
/* Cleared after arena_prof_accum(). */
|
||||
uint64_t prof_accumbytes;
|
||||
/* Drives incremental GC. */
|
||||
ticker_t gc_ticker;
|
||||
/*
|
||||
* The pointer stacks associated with bins follow as a contiguous array.
|
||||
* During tcache initialization, the avail pointer in each element of
|
||||
* tbins is initialized to point to the proper offset within this array.
|
||||
*/
|
||||
cache_bin_t bins_small[SC_NBINS];
|
||||
|
||||
/*
|
||||
* This data is less hot; we can be a little less careful with our
|
||||
* footprint here.
|
||||
*/
|
||||
/* Lets us track all the tcaches in an arena. */
|
||||
ql_elm(tcache_t) link;
|
||||
|
||||
/* Logically scoped to tsd, but put here for cache layout reasons. */
|
||||
ql_elm(tsd_t) tsd_link;
|
||||
bool in_hook;
|
||||
|
||||
/*
|
||||
* The descriptor lets the arena find our cache bins without seeing the
|
||||
* tcache definition. This enables arenas to aggregate stats across
|
||||
* tcaches without having a tcache dependency.
|
||||
*/
|
||||
cache_bin_array_descriptor_t cache_bin_array_descriptor;
|
||||
|
||||
/* The arena this tcache is associated with. */
|
||||
arena_t *arena;
|
||||
/* Next bin to GC. */
|
||||
szind_t next_gc_bin;
|
||||
/* For small bins, fill (ncached_max >> lg_fill_div). */
|
||||
uint8_t lg_fill_div[SC_NBINS];
|
||||
/*
|
||||
* We put the cache bins for large size classes at the end of the
|
||||
* struct, since some of them might not get used. This might end up
|
||||
* letting us avoid touching an extra page if we don't have to.
|
||||
*/
|
||||
cache_bin_t bins_large[SC_NSIZES-SC_NBINS];
|
||||
};
|
||||
|
||||
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
||||
struct tcaches_s {
|
||||
union {
|
||||
tcache_t *tcache;
|
||||
tcaches_t *next;
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
|
||||
59
deps/jemalloc/include/jemalloc/internal/tcache_types.h
vendored
Normal file
59
deps/jemalloc/include/jemalloc/internal/tcache_types.h
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
|
||||
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
typedef struct tcache_s tcache_t;
|
||||
typedef struct tcaches_s tcaches_t;
|
||||
|
||||
/*
|
||||
* tcache pointers close to NULL are used to encode state information that is
|
||||
* used for two purposes: preventing thread caching on a per thread basis and
|
||||
* cleaning up during thread shutdown.
|
||||
*/
|
||||
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
||||
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
||||
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||
|
||||
/*
|
||||
* Absolute minimum number of cache slots for each small bin.
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MIN 20
|
||||
|
||||
/*
|
||||
* Absolute maximum number of cache slots for each small bin in the thread
|
||||
* cache. This is an additional constraint beyond that imposed as: twice the
|
||||
* number of regions per slab for this size class.
|
||||
*
|
||||
* This constant must be an even number.
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||
|
||||
/* Number of cache slots for large size classes. */
|
||||
#define TCACHE_NSLOTS_LARGE 20
|
||||
|
||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
|
||||
/*
|
||||
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
||||
* full GC sweeps. Integer rounding may cause the actual number to be
|
||||
* slightly higher, since GC is performed incrementally.
|
||||
*/
|
||||
#define TCACHE_GC_SWEEP 8192
|
||||
|
||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||
#define TCACHE_GC_INCR \
|
||||
((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
|
||||
|
||||
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
|
||||
#define TCACHE_ZERO_INITIALIZER {0}
|
||||
|
||||
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
|
||||
#define TCACHE_ENABLED_ZERO_INITIALIZER false
|
||||
|
||||
/* Used for explicit tcache only. Means flushed but not destroyed. */
|
||||
#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
|
||||
19
deps/jemalloc/include/jemalloc/internal/test_hooks.h
vendored
Normal file
19
deps/jemalloc/include/jemalloc/internal/test_hooks.h
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H
|
||||
#define JEMALLOC_INTERNAL_TEST_HOOKS_H
|
||||
|
||||
extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)();
|
||||
extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)();
|
||||
|
||||
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
|
||||
|
||||
#define open JEMALLOC_HOOK(open, test_hooks_libc_hook)
|
||||
#define read JEMALLOC_HOOK(read, test_hooks_libc_hook)
|
||||
#define write JEMALLOC_HOOK(write, test_hooks_libc_hook)
|
||||
#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook)
|
||||
#define close JEMALLOC_HOOK(close, test_hooks_libc_hook)
|
||||
#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook)
|
||||
#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook)
|
||||
/* Note that this is undef'd and re-define'd in src/prof.c. */
|
||||
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */
|
||||
91
deps/jemalloc/include/jemalloc/internal/ticker.h
vendored
Normal file
91
deps/jemalloc/include/jemalloc/internal/ticker.h
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TICKER_H
|
||||
#define JEMALLOC_INTERNAL_TICKER_H
|
||||
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/**
|
||||
* A ticker makes it easy to count-down events until some limit. You
|
||||
* ticker_init the ticker to trigger every nticks events. You then notify it
|
||||
* that an event has occurred with calls to ticker_tick (or that nticks events
|
||||
* have occurred with a call to ticker_ticks), which will return true (and reset
|
||||
* the counter) if the countdown hit zero.
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
int32_t tick;
|
||||
int32_t nticks;
|
||||
} ticker_t;
|
||||
|
||||
static inline void
|
||||
ticker_init(ticker_t *ticker, int32_t nticks) {
|
||||
ticker->tick = nticks;
|
||||
ticker->nticks = nticks;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ticker_copy(ticker_t *ticker, const ticker_t *other) {
|
||||
*ticker = *other;
|
||||
}
|
||||
|
||||
static inline int32_t
|
||||
ticker_read(const ticker_t *ticker) {
|
||||
return ticker->tick;
|
||||
}
|
||||
|
||||
/*
|
||||
* Not intended to be a public API. Unfortunately, on x86, neither gcc nor
|
||||
* clang seems smart enough to turn
|
||||
* ticker->tick -= nticks;
|
||||
* if (unlikely(ticker->tick < 0)) {
|
||||
* fixup ticker
|
||||
* return true;
|
||||
* }
|
||||
* return false;
|
||||
* into
|
||||
* subq %nticks_reg, (%ticker_reg)
|
||||
* js fixup ticker
|
||||
*
|
||||
* unless we force "fixup ticker" out of line. In that case, gcc gets it right,
|
||||
* but clang now does worse than before. So, on x86 with gcc, we force it out
|
||||
* of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
|
||||
* worth the hassle, but this is on the fast path of both malloc and free (via
|
||||
* tcache_event).
|
||||
*/
|
||||
#if defined(__GNUC__) && !defined(__clang__) \
|
||||
&& (defined(__x86_64__) || defined(__i386__))
|
||||
JEMALLOC_NOINLINE
|
||||
#endif
|
||||
static bool
|
||||
ticker_fixup(ticker_t *ticker) {
|
||||
ticker->tick = ticker->nticks;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ticker_ticks(ticker_t *ticker, int32_t nticks) {
|
||||
ticker->tick -= nticks;
|
||||
if (unlikely(ticker->tick < 0)) {
|
||||
return ticker_fixup(ticker);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ticker_tick(ticker_t *ticker) {
|
||||
return ticker_ticks(ticker, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to tick. If ticker would fire, return true, but rely on
|
||||
* slowpath to reset ticker.
|
||||
*/
|
||||
static inline bool
|
||||
ticker_trytick(ticker_t *ticker) {
|
||||
--ticker->tick;
|
||||
if (unlikely(ticker->tick < 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TICKER_H */
|
||||
415
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
Normal file
415
deps/jemalloc/include/jemalloc/internal/tsd.h
vendored
Normal file
@@ -0,0 +1,415 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TSD_H
|
||||
#define JEMALLOC_INTERNAL_TSD_H
|
||||
|
||||
#include "jemalloc/internal/arena_types.h"
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/bin_types.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_externs.h"
|
||||
#include "jemalloc/internal/prof_types.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/rtree_tsd.h"
|
||||
#include "jemalloc/internal/tcache_types.h"
|
||||
#include "jemalloc/internal/tcache_structs.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
/*
|
||||
* Thread-Specific-Data layout
|
||||
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
|
||||
* s: state
|
||||
* e: tcache_enabled
|
||||
* m: thread_allocated (config_stats)
|
||||
* f: thread_deallocated (config_stats)
|
||||
* p: prof_tdata (config_prof)
|
||||
* c: rtree_ctx (rtree cache accessed on deallocation)
|
||||
* t: tcache
|
||||
* --- data not accessed on tcache fast path: arena-related fields ---
|
||||
* d: arenas_tdata_bypass
|
||||
* r: reentrancy_level
|
||||
* x: narenas_tdata
|
||||
* i: iarena
|
||||
* a: arena
|
||||
* o: arenas_tdata
|
||||
* Loading TSD data is on the critical path of basically all malloc operations.
|
||||
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
|
||||
* Use a compact layout to reduce cache footprint.
|
||||
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
|
||||
* |---------------------------- 1st cacheline ----------------------------|
|
||||
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
|
||||
* |---------------------------- 2nd cacheline ----------------------------|
|
||||
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
|
||||
* |---------------------------- 3nd cacheline ----------------------------|
|
||||
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
|
||||
* +-------------------------------------------------------------------------+
|
||||
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
|
||||
*
|
||||
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
|
||||
* fast path. However we have a number of unused tcache bins and witnesses
|
||||
* (never touched unless config_debug) at the end of tcache, so we place them
|
||||
* there to avoid breaking the cachelines and possibly paging in an extra page.
|
||||
*/
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (*test_callback_t)(int *);
|
||||
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
|
||||
# define MALLOC_TEST_TSD \
|
||||
O(test_data, int, int) \
|
||||
O(test_callback, test_callback_t, int)
|
||||
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
|
||||
#else
|
||||
# define MALLOC_TEST_TSD
|
||||
# define MALLOC_TEST_TSD_INITIALIZER
|
||||
#endif
|
||||
|
||||
/* O(name, type, nullable type */
|
||||
#define MALLOC_TSD \
|
||||
O(tcache_enabled, bool, bool) \
|
||||
O(arenas_tdata_bypass, bool, bool) \
|
||||
O(reentrancy_level, int8_t, int8_t) \
|
||||
O(narenas_tdata, uint32_t, uint32_t) \
|
||||
O(offset_state, uint64_t, uint64_t) \
|
||||
O(thread_allocated, uint64_t, uint64_t) \
|
||||
O(thread_deallocated, uint64_t, uint64_t) \
|
||||
O(bytes_until_sample, int64_t, int64_t) \
|
||||
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
|
||||
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
|
||||
O(iarena, arena_t *, arena_t *) \
|
||||
O(arena, arena_t *, arena_t *) \
|
||||
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
|
||||
O(binshards, tsd_binshards_t, tsd_binshards_t)\
|
||||
O(tcache, tcache_t, tcache_t) \
|
||||
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
|
||||
MALLOC_TEST_TSD
|
||||
|
||||
#define TSD_INITIALIZER { \
|
||||
ATOMIC_INIT(tsd_state_uninitialized), \
|
||||
TCACHE_ENABLED_ZERO_INITIALIZER, \
|
||||
false, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
NULL, \
|
||||
RTREE_CTX_ZERO_INITIALIZER, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
TSD_BINSHARDS_ZERO_INITIALIZER, \
|
||||
TCACHE_ZERO_INITIALIZER, \
|
||||
WITNESS_TSD_INITIALIZER \
|
||||
MALLOC_TEST_TSD_INITIALIZER \
|
||||
}
|
||||
|
||||
void *malloc_tsd_malloc(size_t size);
|
||||
void malloc_tsd_dalloc(void *wrapper);
|
||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||
tsd_t *malloc_tsd_boot0(void);
|
||||
void malloc_tsd_boot1(void);
|
||||
void tsd_cleanup(void *arg);
|
||||
tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
|
||||
void tsd_state_set(tsd_t *tsd, uint8_t new_state);
|
||||
void tsd_slow_update(tsd_t *tsd);
|
||||
void tsd_prefork(tsd_t *tsd);
|
||||
void tsd_postfork_parent(tsd_t *tsd);
|
||||
void tsd_postfork_child(tsd_t *tsd);
|
||||
|
||||
/*
|
||||
* Call ..._inc when your module wants to take all threads down the slow paths,
|
||||
* and ..._dec when it no longer needs to.
|
||||
*/
|
||||
void tsd_global_slow_inc(tsdn_t *tsdn);
|
||||
void tsd_global_slow_dec(tsdn_t *tsdn);
|
||||
bool tsd_global_slow();
|
||||
|
||||
enum {
|
||||
/* Common case --> jnz. */
|
||||
tsd_state_nominal = 0,
|
||||
/* Initialized but on slow path. */
|
||||
tsd_state_nominal_slow = 1,
|
||||
/*
|
||||
* Some thread has changed global state in such a way that all nominal
|
||||
* threads need to recompute their fast / slow status the next time they
|
||||
* get a chance.
|
||||
*
|
||||
* Any thread can change another thread's status *to* recompute, but
|
||||
* threads are the only ones who can change their status *from*
|
||||
* recompute.
|
||||
*/
|
||||
tsd_state_nominal_recompute = 2,
|
||||
/*
|
||||
* The above nominal states should be lower values. We use
|
||||
* tsd_nominal_max to separate nominal states from threads in the
|
||||
* process of being born / dying.
|
||||
*/
|
||||
tsd_state_nominal_max = 2,
|
||||
|
||||
/*
|
||||
* A thread might free() during its death as its only allocator action;
|
||||
* in such scenarios, we need tsd, but set up in such a way that no
|
||||
* cleanup is necessary.
|
||||
*/
|
||||
tsd_state_minimal_initialized = 3,
|
||||
/* States during which we know we're in thread death. */
|
||||
tsd_state_purgatory = 4,
|
||||
tsd_state_reincarnated = 5,
|
||||
/*
|
||||
* What it says on the tin; tsd that hasn't been initialized. Note
|
||||
* that even when the tsd struct lives in TLS, when need to keep track
|
||||
* of stuff like whether or not our pthread destructors have been
|
||||
* scheduled, so this really truly is different than the nominal state.
|
||||
*/
|
||||
tsd_state_uninitialized = 6
|
||||
};
|
||||
|
||||
/*
|
||||
* Some TSD accesses can only be done in a nominal state. To enforce this, we
|
||||
* wrap TSD member access in a function that asserts on TSD state, and mangle
|
||||
* field names to prevent touching them accidentally.
|
||||
*/
|
||||
#define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n
|
||||
|
||||
#ifdef JEMALLOC_U8_ATOMICS
|
||||
# define tsd_state_t atomic_u8_t
|
||||
# define tsd_atomic_load atomic_load_u8
|
||||
# define tsd_atomic_store atomic_store_u8
|
||||
# define tsd_atomic_exchange atomic_exchange_u8
|
||||
#else
|
||||
# define tsd_state_t atomic_u32_t
|
||||
# define tsd_atomic_load atomic_load_u32
|
||||
# define tsd_atomic_store atomic_store_u32
|
||||
# define tsd_atomic_exchange atomic_exchange_u32
|
||||
#endif
|
||||
|
||||
/* The actual tsd. */
|
||||
struct tsd_s {
|
||||
/*
|
||||
* The contents should be treated as totally opaque outside the tsd
|
||||
* module. Access any thread-local state through the getters and
|
||||
* setters below.
|
||||
*/
|
||||
|
||||
/*
|
||||
* We manually limit the state to just a single byte. Unless the 8-bit
|
||||
* atomics are unavailable (which is rare).
|
||||
*/
|
||||
tsd_state_t state;
|
||||
#define O(n, t, nt) \
|
||||
t TSD_MANGLE(n);
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
};
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint8_t
|
||||
tsd_state_get(tsd_t *tsd) {
|
||||
/*
|
||||
* This should be atomic. Unfortunately, compilers right now can't tell
|
||||
* that this can be done as a memory comparison, and forces a load into
|
||||
* a register that hurts fast-path performance.
|
||||
*/
|
||||
/* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */
|
||||
return *(uint8_t *)&tsd->state;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
|
||||
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
|
||||
* explicitly converted to tsd_t, which is non-nullable.
|
||||
*/
|
||||
struct tsdn_s {
|
||||
tsd_t tsd;
|
||||
};
|
||||
#define TSDN_NULL ((tsdn_t *)0)
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsd_tsdn(tsd_t *tsd) {
|
||||
return (tsdn_t *)tsd;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsdn_null(const tsdn_t *tsdn) {
|
||||
return tsdn == NULL;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsdn_tsd(tsdn_t *tsdn) {
|
||||
assert(!tsdn_null(tsdn));
|
||||
|
||||
return &tsdn->tsd;
|
||||
}
|
||||
|
||||
/*
|
||||
* We put the platform-specific data declarations and inlines into their own
|
||||
* header files to avoid cluttering this file. They define tsd_boot0,
|
||||
* tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
|
||||
*/
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#include "jemalloc/internal/tsd_tls.h"
|
||||
#elif (defined(_WIN32))
|
||||
#include "jemalloc/internal/tsd_win.h"
|
||||
#else
|
||||
#include "jemalloc/internal/tsd_generic.h"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
|
||||
* foo. This omits some safety checks, and so can be used during tsd
|
||||
* initialization and cleanup.
|
||||
*/
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
|
||||
return &tsd->TSD_MANGLE(n); \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get(tsd_t *tsd) { \
|
||||
/* \
|
||||
* Because the state might change asynchronously if it's \
|
||||
* nominal, we need to make sure that we only read it once. \
|
||||
*/ \
|
||||
uint8_t state = tsd_state_get(tsd); \
|
||||
assert(state == tsd_state_nominal || \
|
||||
state == tsd_state_nominal_slow || \
|
||||
state == tsd_state_nominal_recompute || \
|
||||
state == tsd_state_reincarnated || \
|
||||
state == tsd_state_minimal_initialized); \
|
||||
return tsd_##n##p_get_unsafe(tsd); \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/*
|
||||
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
|
||||
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
|
||||
*/
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE nt * \
|
||||
tsdn_##n##p_get(tsdn_t *tsdn) { \
|
||||
if (tsdn_null(tsdn)) { \
|
||||
return NULL; \
|
||||
} \
|
||||
tsd_t *tsd = tsdn_tsd(tsdn); \
|
||||
return (nt *)tsd_##n##p_get(tsd); \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE t \
|
||||
tsd_##n##_get(tsd_t *tsd) { \
|
||||
return *tsd_##n##p_get(tsd); \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
|
||||
#define O(n, t, nt) \
|
||||
JEMALLOC_ALWAYS_INLINE void \
|
||||
tsd_##n##_set(tsd_t *tsd, t val) { \
|
||||
assert(tsd_state_get(tsd) != tsd_state_reincarnated && \
|
||||
tsd_state_get(tsd) != tsd_state_minimal_initialized); \
|
||||
*tsd_##n##p_get(tsd) = val; \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tsd_assert_fast(tsd_t *tsd) {
|
||||
/*
|
||||
* Note that our fastness assertion does *not* include global slowness
|
||||
* counters; it's not in general possible to ensure that they won't
|
||||
* change asynchronously from underneath us.
|
||||
*/
|
||||
assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
|
||||
tsd_reentrancy_level_get(tsd) == 0);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_fast(tsd_t *tsd) {
|
||||
bool fast = (tsd_state_get(tsd) == tsd_state_nominal);
|
||||
if (fast) {
|
||||
tsd_assert_fast(tsd);
|
||||
}
|
||||
|
||||
return fast;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch_impl(bool init, bool minimal) {
|
||||
tsd_t *tsd = tsd_get(init);
|
||||
|
||||
if (!init && tsd_get_allocates() && tsd == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(tsd != NULL);
|
||||
|
||||
if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) {
|
||||
return tsd_fetch_slow(tsd, minimal);
|
||||
}
|
||||
assert(tsd_fast(tsd));
|
||||
tsd_assert_fast(tsd);
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
/* Get a minimal TSD that requires no cleanup. See comments in free(). */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch_min(void) {
|
||||
return tsd_fetch_impl(true, true);
|
||||
}
|
||||
|
||||
/* For internal background threads use only. */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_internal_fetch(void) {
|
||||
tsd_t *tsd = tsd_fetch_min();
|
||||
/* Use reincarnated state to prevent full initialization. */
|
||||
tsd_state_set(tsd, tsd_state_reincarnated);
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch(void) {
|
||||
return tsd_fetch_impl(true, false);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
tsd_nominal(tsd_t *tsd) {
|
||||
return (tsd_state_get(tsd) <= tsd_state_nominal_max);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsdn_fetch(void) {
|
||||
if (!tsd_booted_get()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tsd_tsdn(tsd_fetch_impl(false, false));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
|
||||
tsd_rtree_ctx(tsd_t *tsd) {
|
||||
return tsd_rtree_ctxp_get(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
|
||||
tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
|
||||
/*
|
||||
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
|
||||
* return a pointer to it.
|
||||
*/
|
||||
if (unlikely(tsdn_null(tsdn))) {
|
||||
rtree_ctx_data_init(fallback);
|
||||
return fallback;
|
||||
}
|
||||
return tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TSD_H */
|
||||
163
deps/jemalloc/include/jemalloc/internal/tsd_generic.h
vendored
Normal file
163
deps/jemalloc/include/jemalloc/internal/tsd_generic.h
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
|
||||
#error This file should be included only once, by tsd.h.
|
||||
#endif
|
||||
#define JEMALLOC_INTERNAL_TSD_GENERIC_H
|
||||
|
||||
typedef struct tsd_init_block_s tsd_init_block_t;
|
||||
struct tsd_init_block_s {
|
||||
ql_elm(tsd_init_block_t) link;
|
||||
pthread_t thread;
|
||||
void *data;
|
||||
};
|
||||
|
||||
/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
|
||||
typedef struct tsd_init_head_s tsd_init_head_t;
|
||||
|
||||
typedef struct {
|
||||
bool initialized;
|
||||
tsd_t val;
|
||||
} tsd_wrapper_t;
|
||||
|
||||
void *tsd_init_check_recursion(tsd_init_head_t *head,
|
||||
tsd_init_block_t *block);
|
||||
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
|
||||
|
||||
extern pthread_key_t tsd_tsd;
|
||||
extern tsd_init_head_t tsd_init_head;
|
||||
extern tsd_wrapper_t tsd_boot_wrapper;
|
||||
extern bool tsd_booted;
|
||||
|
||||
/* Initialization/cleanup. */
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tsd_cleanup_wrapper(void *arg) {
|
||||
tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg;
|
||||
|
||||
if (wrapper->initialized) {
|
||||
wrapper->initialized = false;
|
||||
tsd_cleanup(&wrapper->val);
|
||||
if (wrapper->initialized) {
|
||||
/* Trigger another cleanup round. */
|
||||
if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0)
|
||||
{
|
||||
malloc_write("<jemalloc>: Error setting TSD\n");
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
malloc_tsd_dalloc(wrapper);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tsd_wrapper_set(tsd_wrapper_t *wrapper) {
|
||||
if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
|
||||
malloc_write("<jemalloc>: Error setting TSD\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
|
||||
tsd_wrapper_get(bool init) {
|
||||
tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
|
||||
|
||||
if (init && unlikely(wrapper == NULL)) {
|
||||
tsd_init_block_t block;
|
||||
wrapper = (tsd_wrapper_t *)
|
||||
tsd_init_check_recursion(&tsd_init_head, &block);
|
||||
if (wrapper) {
|
||||
return wrapper;
|
||||
}
|
||||
wrapper = (tsd_wrapper_t *)
|
||||
malloc_tsd_malloc(sizeof(tsd_wrapper_t));
|
||||
block.data = (void *)wrapper;
|
||||
if (wrapper == NULL) {
|
||||
malloc_write("<jemalloc>: Error allocating TSD\n");
|
||||
abort();
|
||||
} else {
|
||||
wrapper->initialized = false;
|
||||
JEMALLOC_DIAGNOSTIC_PUSH
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
tsd_t initializer = TSD_INITIALIZER;
|
||||
JEMALLOC_DIAGNOSTIC_POP
|
||||
wrapper->val = initializer;
|
||||
}
|
||||
tsd_wrapper_set(wrapper);
|
||||
tsd_init_finish(&tsd_init_head, &block);
|
||||
}
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_boot0(void) {
|
||||
if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
|
||||
return true;
|
||||
}
|
||||
tsd_wrapper_set(&tsd_boot_wrapper);
|
||||
tsd_booted = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tsd_boot1(void) {
|
||||
tsd_wrapper_t *wrapper;
|
||||
wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t));
|
||||
if (wrapper == NULL) {
|
||||
malloc_write("<jemalloc>: Error allocating TSD\n");
|
||||
abort();
|
||||
}
|
||||
tsd_boot_wrapper.initialized = false;
|
||||
tsd_cleanup(&tsd_boot_wrapper.val);
|
||||
wrapper->initialized = false;
|
||||
JEMALLOC_DIAGNOSTIC_PUSH
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
tsd_t initializer = TSD_INITIALIZER;
|
||||
JEMALLOC_DIAGNOSTIC_POP
|
||||
wrapper->val = initializer;
|
||||
tsd_wrapper_set(wrapper);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_boot(void) {
|
||||
if (tsd_boot0()) {
|
||||
return true;
|
||||
}
|
||||
tsd_boot1();
|
||||
return false;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_booted_get(void) {
|
||||
return tsd_booted;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_get_allocates(void) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Get/set. */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_get(bool init) {
|
||||
tsd_wrapper_t *wrapper;
|
||||
|
||||
assert(tsd_booted);
|
||||
wrapper = tsd_wrapper_get(init);
|
||||
if (tsd_get_allocates() && !init && wrapper == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return &wrapper->val;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tsd_set(tsd_t *val) {
|
||||
tsd_wrapper_t *wrapper;
|
||||
|
||||
assert(tsd_booted);
|
||||
wrapper = tsd_wrapper_get(true);
|
||||
if (likely(&wrapper->val != val)) {
|
||||
wrapper->val = *(val);
|
||||
}
|
||||
wrapper->initialized = true;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user