new version commit
This commit is contained in:
2298
deps/jemalloc/src/arena.c
vendored
Normal file
2298
deps/jemalloc/src/arena.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2
deps/jemalloc/src/atomic.c
vendored
Normal file
2
deps/jemalloc/src/atomic.c
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
#define JEMALLOC_ATOMIC_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
939
deps/jemalloc/src/background_thread.c
vendored
Normal file
939
deps/jemalloc/src/background_thread.c
vendored
Normal file
@@ -0,0 +1,939 @@
|
||||
#define JEMALLOC_BACKGROUND_THREAD_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
/* This option should be opt-in only. */
|
||||
#define BACKGROUND_THREAD_DEFAULT false
|
||||
/* Read-only after initialization. */
|
||||
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
|
||||
size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
|
||||
|
||||
/* Used for thread creation, termination and stats. */
|
||||
malloc_mutex_t background_thread_lock;
|
||||
/* Indicates global state. Atomic because decay reads this w/o locking. */
|
||||
atomic_b_t background_thread_enabled_state;
|
||||
size_t n_background_threads;
|
||||
size_t max_background_threads;
|
||||
/* Thread info per-index. */
|
||||
background_thread_info_t *background_thread_info;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
|
||||
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
|
||||
static void
|
||||
pthread_create_wrapper_init(void) {
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
if (!isthreaded) {
|
||||
isthreaded = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
|
||||
void *(*start_routine)(void *), void *__restrict arg) {
|
||||
pthread_create_wrapper_init();
|
||||
|
||||
return pthread_create_fptr(thread, attr, start_routine, arg);
|
||||
}
|
||||
#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
|
||||
|
||||
#ifndef JEMALLOC_BACKGROUND_THREAD
|
||||
#define NOT_REACHED { not_reached(); }
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
|
||||
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
|
||||
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new) NOT_REACHED
|
||||
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
|
||||
bool background_thread_stats_read(tsdn_t *tsdn,
|
||||
background_thread_stats_t *stats) NOT_REACHED
|
||||
void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
|
||||
#undef NOT_REACHED
|
||||
#else
|
||||
|
||||
static bool background_thread_enabled_at_fork;
|
||||
|
||||
static void
|
||||
background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
|
||||
background_thread_wakeup_time_set(tsdn, info, 0);
|
||||
info->npages_to_purge_new = 0;
|
||||
if (config_stats) {
|
||||
info->tot_n_runs = 0;
|
||||
nstime_init(&info->tot_sleep_time, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
set_current_thread_affinity(int cpu) {
|
||||
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
|
||||
cpu_set_t cpuset;
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(cpu, &cpuset);
|
||||
int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
|
||||
|
||||
return (ret != 0);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Threshold for determining when to wake up the background thread. */
|
||||
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
|
||||
#define BILLION UINT64_C(1000000000)
|
||||
/* Minimal sleep interval 100 ms. */
|
||||
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
|
||||
|
||||
static inline size_t
|
||||
decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
|
||||
size_t i;
|
||||
uint64_t sum = 0;
|
||||
for (i = 0; i < interval; i++) {
|
||||
sum += decay->backlog[i] * h_steps[i];
|
||||
}
|
||||
for (; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
|
||||
}
|
||||
|
||||
return (size_t)(sum >> SMOOTHSTEP_BFP);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
|
||||
extents_t *extents) {
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
/* Use minimal interval if decay is contended. */
|
||||
return BACKGROUND_THREAD_MIN_INTERVAL_NS;
|
||||
}
|
||||
|
||||
uint64_t interval;
|
||||
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
|
||||
if (decay_time <= 0) {
|
||||
/* Purging is eagerly done or disabled currently. */
|
||||
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
|
||||
assert(decay_interval_ns > 0);
|
||||
size_t npages = extents_npages_get(extents);
|
||||
if (npages == 0) {
|
||||
unsigned i;
|
||||
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
if (decay->backlog[i] > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == SMOOTHSTEP_NSTEPS) {
|
||||
/* No dirty pages recorded. Sleep indefinitely. */
|
||||
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
goto label_done;
|
||||
}
|
||||
}
|
||||
if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
/* Use max interval. */
|
||||
interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
|
||||
size_t ub = SMOOTHSTEP_NSTEPS;
|
||||
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
|
||||
lb = (lb < 2) ? 2 : lb;
|
||||
if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
|
||||
(lb + 2 > ub)) {
|
||||
interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
assert(lb + 2 <= ub);
|
||||
size_t npurge_lb, npurge_ub;
|
||||
npurge_lb = decay_npurge_after_interval(decay, lb);
|
||||
if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
interval = decay_interval_ns * lb;
|
||||
goto label_done;
|
||||
}
|
||||
npurge_ub = decay_npurge_after_interval(decay, ub);
|
||||
if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
interval = decay_interval_ns * ub;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
unsigned n_search = 0;
|
||||
size_t target, npurge;
|
||||
while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
|
||||
&& (lb + 2 < ub)) {
|
||||
target = (lb + ub) / 2;
|
||||
npurge = decay_npurge_after_interval(decay, target);
|
||||
if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
ub = target;
|
||||
npurge_ub = npurge;
|
||||
} else {
|
||||
lb = target;
|
||||
npurge_lb = npurge;
|
||||
}
|
||||
assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
|
||||
}
|
||||
interval = decay_interval_ns * (ub + lb) / 2;
|
||||
label_done:
|
||||
interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
|
||||
BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
return interval;
|
||||
}
|
||||
|
||||
/* Compute purge interval for background threads. */
|
||||
static uint64_t
|
||||
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
|
||||
uint64_t i1, i2;
|
||||
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
|
||||
&arena->extents_dirty);
|
||||
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
return i1;
|
||||
}
|
||||
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
|
||||
&arena->extents_muzzy);
|
||||
|
||||
return i1 < i2 ? i1 : i2;
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
|
||||
uint64_t interval) {
|
||||
if (config_stats) {
|
||||
info->tot_n_runs++;
|
||||
}
|
||||
info->npages_to_purge_new = 0;
|
||||
|
||||
struct timeval tv;
|
||||
/* Specific clock required by timedwait. */
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_t before_sleep;
|
||||
nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
|
||||
|
||||
int ret;
|
||||
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
|
||||
assert(background_thread_indefinite_sleep(info));
|
||||
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
|
||||
assert(ret == 0);
|
||||
} else {
|
||||
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
|
||||
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
/* We need malloc clock (can be different from tv). */
|
||||
nstime_t next_wakeup;
|
||||
nstime_init(&next_wakeup, 0);
|
||||
nstime_update(&next_wakeup);
|
||||
nstime_iadd(&next_wakeup, interval);
|
||||
assert(nstime_ns(&next_wakeup) <
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
background_thread_wakeup_time_set(tsdn, info,
|
||||
nstime_ns(&next_wakeup));
|
||||
|
||||
nstime_t ts_wakeup;
|
||||
nstime_copy(&ts_wakeup, &before_sleep);
|
||||
nstime_iadd(&ts_wakeup, interval);
|
||||
struct timespec ts;
|
||||
ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
|
||||
ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
|
||||
|
||||
assert(!background_thread_indefinite_sleep(info));
|
||||
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
|
||||
assert(ret == ETIMEDOUT || ret == 0);
|
||||
background_thread_wakeup_time_set(tsdn, info,
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
}
|
||||
if (config_stats) {
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_t after_sleep;
|
||||
nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
|
||||
if (nstime_compare(&after_sleep, &before_sleep) > 0) {
|
||||
nstime_subtract(&after_sleep, &before_sleep);
|
||||
nstime_add(&info->tot_sleep_time, &after_sleep);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
|
||||
if (unlikely(info->state == background_thread_paused)) {
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
/* Wait on global lock to update status. */
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
|
||||
uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
unsigned narenas = narenas_total_get();
|
||||
|
||||
for (unsigned i = ind; i < narenas; i += max_background_threads) {
|
||||
arena_t *arena = arena_get(tsdn, i, false);
|
||||
if (!arena) {
|
||||
continue;
|
||||
}
|
||||
arena_decay(tsdn, arena, true, false);
|
||||
if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
/* Min interval will be used. */
|
||||
continue;
|
||||
}
|
||||
uint64_t interval = arena_decay_compute_purge_interval(tsdn,
|
||||
arena);
|
||||
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
|
||||
if (min_interval > interval) {
|
||||
min_interval = interval;
|
||||
}
|
||||
}
|
||||
background_thread_sleep(tsdn, info, min_interval);
|
||||
}
|
||||
|
||||
static bool
|
||||
background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
|
||||
if (info == &background_thread_info[0]) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd),
|
||||
&background_thread_lock);
|
||||
} else {
|
||||
malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
|
||||
&background_thread_lock);
|
||||
}
|
||||
|
||||
pre_reentrancy(tsd, NULL);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
bool has_thread;
|
||||
assert(info->state != background_thread_paused);
|
||||
if (info->state == background_thread_started) {
|
||||
has_thread = true;
|
||||
info->state = background_thread_stopped;
|
||||
pthread_cond_signal(&info->cond);
|
||||
} else {
|
||||
has_thread = false;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
|
||||
if (!has_thread) {
|
||||
post_reentrancy(tsd);
|
||||
return false;
|
||||
}
|
||||
void *ret;
|
||||
if (pthread_join(info->thread, &ret)) {
|
||||
post_reentrancy(tsd);
|
||||
return true;
|
||||
}
|
||||
assert(ret == NULL);
|
||||
n_background_threads--;
|
||||
post_reentrancy(tsd);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void *background_thread_entry(void *ind_arg);
|
||||
|
||||
static int
|
||||
background_thread_create_signals_masked(pthread_t *thread,
|
||||
const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
|
||||
/*
|
||||
* Mask signals during thread creation so that the thread inherits
|
||||
* an empty signal set.
|
||||
*/
|
||||
sigset_t set;
|
||||
sigfillset(&set);
|
||||
sigset_t oldset;
|
||||
int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
|
||||
if (mask_err != 0) {
|
||||
return mask_err;
|
||||
}
|
||||
int create_err = pthread_create_wrapper(thread, attr, start_routine,
|
||||
arg);
|
||||
/*
|
||||
* Restore the signal mask. Failure to restore the signal mask here
|
||||
* changes program behavior.
|
||||
*/
|
||||
int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
|
||||
if (restore_err != 0) {
|
||||
malloc_printf("<jemalloc>: background thread creation "
|
||||
"failed (%d), and signal mask restoration failed "
|
||||
"(%d)\n", create_err, restore_err);
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
return create_err;
|
||||
}
|
||||
|
||||
static bool
|
||||
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
|
||||
bool *created_threads) {
|
||||
bool ret = false;
|
||||
if (likely(*n_created == n_background_threads)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
|
||||
for (unsigned i = 1; i < max_background_threads; i++) {
|
||||
if (created_threads[i]) {
|
||||
continue;
|
||||
}
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
/*
|
||||
* In case of the background_thread_paused state because of
|
||||
* arena reset, delay the creation.
|
||||
*/
|
||||
bool create = (info->state == background_thread_started);
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
if (!create) {
|
||||
continue;
|
||||
}
|
||||
|
||||
pre_reentrancy(tsd, NULL);
|
||||
int err = background_thread_create_signals_masked(&info->thread,
|
||||
NULL, background_thread_entry, (void *)(uintptr_t)i);
|
||||
post_reentrancy(tsd);
|
||||
|
||||
if (err == 0) {
|
||||
(*n_created)++;
|
||||
created_threads[i] = true;
|
||||
} else {
|
||||
malloc_printf("<jemalloc>: background thread "
|
||||
"creation failed (%d)\n", err);
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
/* Return to restart the loop since we unlocked. */
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread0_work(tsd_t *tsd) {
|
||||
/* Thread0 is also responsible for launching / terminating threads. */
|
||||
VARIABLE_ARRAY(bool, created_threads, max_background_threads);
|
||||
unsigned i;
|
||||
for (i = 1; i < max_background_threads; i++) {
|
||||
created_threads[i] = false;
|
||||
}
|
||||
/* Start working, and create more threads when asked. */
|
||||
unsigned n_created = 1;
|
||||
while (background_thread_info[0].state != background_thread_stopped) {
|
||||
if (background_thread_pause_check(tsd_tsdn(tsd),
|
||||
&background_thread_info[0])) {
|
||||
continue;
|
||||
}
|
||||
if (check_background_thread_creation(tsd, &n_created,
|
||||
(bool *)&created_threads)) {
|
||||
continue;
|
||||
}
|
||||
background_work_sleep_once(tsd_tsdn(tsd),
|
||||
&background_thread_info[0], 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Shut down other threads at exit. Note that the ctl thread is holding
|
||||
* the global background_thread mutex (and is waiting) for us.
|
||||
*/
|
||||
assert(!background_thread_enabled());
|
||||
for (i = 1; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
assert(info->state != background_thread_paused);
|
||||
if (created_threads[i]) {
|
||||
background_threads_disable_single(tsd, info);
|
||||
} else {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
if (info->state != background_thread_stopped) {
|
||||
/* The thread was not created. */
|
||||
assert(info->state ==
|
||||
background_thread_started);
|
||||
n_background_threads--;
|
||||
info->state = background_thread_stopped;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
}
|
||||
}
|
||||
background_thread_info[0].state = background_thread_stopped;
|
||||
assert(n_background_threads == 1);
|
||||
}
|
||||
|
||||
static void
|
||||
background_work(tsd_t *tsd, unsigned ind) {
|
||||
background_thread_info_t *info = &background_thread_info[ind];
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
if (ind == 0) {
|
||||
background_thread0_work(tsd);
|
||||
} else {
|
||||
while (info->state != background_thread_stopped) {
|
||||
if (background_thread_pause_check(tsd_tsdn(tsd),
|
||||
info)) {
|
||||
continue;
|
||||
}
|
||||
background_work_sleep_once(tsd_tsdn(tsd), info, ind);
|
||||
}
|
||||
}
|
||||
assert(info->state == background_thread_stopped);
|
||||
background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
}
|
||||
|
||||
static void *
|
||||
background_thread_entry(void *ind_arg) {
|
||||
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
|
||||
assert(thread_ind < max_background_threads);
|
||||
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
|
||||
#elif defined(__FreeBSD__)
|
||||
pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
|
||||
#endif
|
||||
if (opt_percpu_arena != percpu_arena_disabled) {
|
||||
set_current_thread_affinity((int)thread_ind);
|
||||
}
|
||||
/*
|
||||
* Start periodic background work. We use internal tsd which avoids
|
||||
* side effects, for example triggering new arena creation (which in
|
||||
* turn triggers another background thread creation).
|
||||
*/
|
||||
background_work(tsd_internal_fetch(), thread_ind);
|
||||
assert(pthread_equal(pthread_self(),
|
||||
background_thread_info[thread_ind].thread));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
info->state = background_thread_started;
|
||||
background_thread_info_init(tsd_tsdn(tsd), info);
|
||||
n_background_threads++;
|
||||
}
|
||||
|
||||
static bool
|
||||
background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
|
||||
assert(have_background_thread);
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
/* We create at most NCPUs threads. */
|
||||
size_t thread_ind = arena_ind % max_background_threads;
|
||||
background_thread_info_t *info = &background_thread_info[thread_ind];
|
||||
|
||||
bool need_new_thread;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
need_new_thread = background_thread_enabled() &&
|
||||
(info->state == background_thread_stopped);
|
||||
if (need_new_thread) {
|
||||
background_thread_init(tsd, info);
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
if (!need_new_thread) {
|
||||
return false;
|
||||
}
|
||||
if (arena_ind != 0) {
|
||||
/* Threads are created asynchronously by Thread 0. */
|
||||
background_thread_info_t *t0 = &background_thread_info[0];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
|
||||
assert(t0->state == background_thread_started);
|
||||
pthread_cond_signal(&t0->cond);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
pre_reentrancy(tsd, NULL);
|
||||
/*
|
||||
* To avoid complications (besides reentrancy), create internal
|
||||
* background threads with the underlying pthread_create.
|
||||
*/
|
||||
int err = background_thread_create_signals_masked(&info->thread, NULL,
|
||||
background_thread_entry, (void *)thread_ind);
|
||||
post_reentrancy(tsd);
|
||||
|
||||
if (err != 0) {
|
||||
malloc_printf("<jemalloc>: arena 0 background thread creation "
|
||||
"failed (%d)\n", err);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
info->state = background_thread_stopped;
|
||||
n_background_threads--;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Create a new background thread if needed. */
|
||||
bool
|
||||
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
||||
assert(have_background_thread);
|
||||
|
||||
bool ret;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
ret = background_thread_create_locked(tsd, arena_ind);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
background_threads_enable(tsd_t *tsd) {
|
||||
assert(n_background_threads == 0);
|
||||
assert(background_thread_enabled());
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
VARIABLE_ARRAY(bool, marked, max_background_threads);
|
||||
unsigned i, nmarked;
|
||||
for (i = 0; i < max_background_threads; i++) {
|
||||
marked[i] = false;
|
||||
}
|
||||
nmarked = 0;
|
||||
/* Thread 0 is required and created at the end. */
|
||||
marked[0] = true;
|
||||
/* Mark the threads we need to create for thread 0. */
|
||||
unsigned n = narenas_total_get();
|
||||
for (i = 1; i < n; i++) {
|
||||
if (marked[i % max_background_threads] ||
|
||||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
|
||||
continue;
|
||||
}
|
||||
background_thread_info_t *info = &background_thread_info[
|
||||
i % max_background_threads];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
assert(info->state == background_thread_stopped);
|
||||
background_thread_init(tsd, info);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
marked[i % max_background_threads] = true;
|
||||
if (++nmarked == max_background_threads) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return background_thread_create_locked(tsd, 0);
|
||||
}
|
||||
|
||||
bool
|
||||
background_threads_disable(tsd_t *tsd) {
|
||||
assert(!background_thread_enabled());
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
/* Thread 0 will be responsible for terminating other threads. */
|
||||
if (background_threads_disable_single(tsd,
|
||||
&background_thread_info[0])) {
|
||||
return true;
|
||||
}
|
||||
assert(n_background_threads == 0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check if we need to signal the background thread early. */
|
||||
void
|
||||
background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new) {
|
||||
background_thread_info_t *info = arena_background_thread_info_get(
|
||||
arena);
|
||||
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
|
||||
/*
|
||||
* Background thread may hold the mutex for a long period of
|
||||
* time. We'd like to avoid the variance on application
|
||||
* threads. So keep this non-blocking, and leave the work to a
|
||||
* future epoch.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->state != background_thread_started) {
|
||||
goto label_done;
|
||||
}
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
|
||||
if (decay_time <= 0) {
|
||||
/* Purging is eagerly done or disabled currently. */
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
|
||||
assert(decay_interval_ns > 0);
|
||||
|
||||
nstime_t diff;
|
||||
nstime_init(&diff, background_thread_wakeup_time_get(info));
|
||||
if (nstime_compare(&diff, &decay->epoch) <= 0) {
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
nstime_subtract(&diff, &decay->epoch);
|
||||
if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
|
||||
if (npages_new > 0) {
|
||||
size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
|
||||
/*
|
||||
* Compute how many new pages we would need to purge by the next
|
||||
* wakeup, which is used to determine if we should signal the
|
||||
* background thread.
|
||||
*/
|
||||
uint64_t npurge_new;
|
||||
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
|
||||
npurge_new = npages_new;
|
||||
} else {
|
||||
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
|
||||
assert(h_steps_max >=
|
||||
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
|
||||
npurge_new = npages_new * (h_steps_max -
|
||||
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
|
||||
npurge_new >>= SMOOTHSTEP_BFP;
|
||||
}
|
||||
info->npages_to_purge_new += npurge_new;
|
||||
}
|
||||
|
||||
bool should_signal;
|
||||
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
should_signal = true;
|
||||
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
|
||||
(extents_npages_get(&arena->extents_dirty) > 0 ||
|
||||
extents_npages_get(&arena->extents_muzzy) > 0 ||
|
||||
info->npages_to_purge_new > 0)) {
|
||||
should_signal = true;
|
||||
} else {
|
||||
should_signal = false;
|
||||
}
|
||||
|
||||
if (should_signal) {
|
||||
info->npages_to_purge_new = 0;
|
||||
pthread_cond_signal(&info->cond);
|
||||
}
|
||||
label_done_unlock2:
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
label_done:
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_prefork0(tsdn_t *tsdn) {
|
||||
malloc_mutex_prefork(tsdn, &background_thread_lock);
|
||||
background_thread_enabled_at_fork = background_thread_enabled();
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_prefork1(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_postfork_parent(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
malloc_mutex_postfork_parent(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_postfork_child(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
malloc_mutex_postfork_child(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
malloc_mutex_postfork_child(tsdn, &background_thread_lock);
|
||||
if (!background_thread_enabled_at_fork) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear background_thread state (reset to disabled for child). */
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
n_background_threads = 0;
|
||||
background_thread_enabled_set(tsdn, false);
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
info->state = background_thread_stopped;
|
||||
int ret = pthread_cond_init(&info->cond, NULL);
|
||||
assert(ret == 0);
|
||||
background_thread_info_init(tsdn, info);
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
}
|
||||
|
||||
bool
|
||||
background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
|
||||
assert(config_stats);
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
if (!background_thread_enabled()) {
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
stats->num_threads = n_background_threads;
|
||||
uint64_t num_runs = 0;
|
||||
nstime_init(&stats->run_interval, 0);
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
|
||||
/*
|
||||
* Each background thread run may take a long time;
|
||||
* avoid waiting on the stats if the thread is active.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
if (info->state != background_thread_stopped) {
|
||||
num_runs += info->tot_n_runs;
|
||||
nstime_add(&stats->run_interval, &info->tot_sleep_time);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
stats->num_runs = num_runs;
|
||||
if (num_runs > 0) {
|
||||
nstime_idivide(&stats->run_interval, num_runs);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
|
||||
#undef BILLION
|
||||
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
|
||||
|
||||
#ifdef JEMALLOC_HAVE_DLSYM
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
static bool
|
||||
pthread_create_fptr_init(void) {
|
||||
if (pthread_create_fptr != NULL) {
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* Try the next symbol first, because 1) when use lazy_lock we have a
|
||||
* wrapper for pthread_create; and 2) application may define its own
|
||||
* wrapper as well (and can call malloc within the wrapper).
|
||||
*/
|
||||
#ifdef JEMALLOC_HAVE_DLSYM
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
#else
|
||||
pthread_create_fptr = NULL;
|
||||
#endif
|
||||
if (pthread_create_fptr == NULL) {
|
||||
if (config_lazy_lock) {
|
||||
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
|
||||
"\"pthread_create\")\n");
|
||||
abort();
|
||||
} else {
|
||||
/* Fall back to the default symbol. */
|
||||
pthread_create_fptr = pthread_create;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* When lazy lock is enabled, we need to make sure setting isthreaded before
|
||||
* taking any background_thread locks. This is called early in ctl (instead of
|
||||
* wait for the pthread_create calls to trigger) because the mutex is required
|
||||
* before creating background threads.
|
||||
*/
|
||||
void
|
||||
background_thread_ctl_init(tsdn_t *tsdn) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
pthread_create_fptr_init();
|
||||
pthread_create_wrapper_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
|
||||
|
||||
bool
|
||||
background_thread_boot0(void) {
|
||||
if (!have_background_thread && opt_background_thread) {
|
||||
malloc_printf("<jemalloc>: option background_thread currently "
|
||||
"supports pthread only\n");
|
||||
return true;
|
||||
}
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
if ((config_lazy_lock || opt_background_thread) &&
|
||||
pthread_create_fptr_init()) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
background_thread_boot1(tsdn_t *tsdn) {
|
||||
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||
assert(have_background_thread);
|
||||
assert(narenas_total_get() > 0);
|
||||
|
||||
if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {
|
||||
opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;
|
||||
}
|
||||
max_background_threads = opt_max_background_threads;
|
||||
|
||||
background_thread_enabled_set(tsdn, opt_background_thread);
|
||||
if (malloc_mutex_init(&background_thread_lock,
|
||||
"background_thread_global",
|
||||
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
|
||||
b0get(), opt_max_background_threads *
|
||||
sizeof(background_thread_info_t), CACHELINE);
|
||||
if (background_thread_info == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
/* Thread mutex is rank_inclusive because of thread0. */
|
||||
if (malloc_mutex_init(&info->mtx, "background_thread",
|
||||
WITNESS_RANK_BACKGROUND_THREAD,
|
||||
malloc_mutex_address_ordered)) {
|
||||
return true;
|
||||
}
|
||||
if (pthread_cond_init(&info->cond, NULL)) {
|
||||
return true;
|
||||
}
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
info->state = background_thread_stopped;
|
||||
background_thread_info_init(tsdn, info);
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
514
deps/jemalloc/src/base.c
vendored
Normal file
514
deps/jemalloc/src/base.c
vendored
Normal file
@@ -0,0 +1,514 @@
|
||||
#define JEMALLOC_BASE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
static base_t *b0;
|
||||
|
||||
metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
|
||||
|
||||
const char *metadata_thp_mode_names[] = {
|
||||
"disabled",
|
||||
"auto",
|
||||
"always"
|
||||
};
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static inline bool
|
||||
metadata_thp_madvise(void) {
|
||||
return (metadata_thp_enabled() &&
|
||||
(init_system_thp_mode == thp_mode_default));
|
||||
}
|
||||
|
||||
static void *
|
||||
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
|
||||
void *addr;
|
||||
bool zero = true;
|
||||
bool commit = true;
|
||||
|
||||
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
|
||||
assert(size == HUGEPAGE_CEILING(size));
|
||||
size_t alignment = HUGEPAGE;
|
||||
if (extent_hooks == &extent_hooks_default) {
|
||||
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
|
||||
} else {
|
||||
/* No arena context as we are creating new arenas. */
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
pre_reentrancy(tsd, NULL);
|
||||
addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
|
||||
&zero, &commit, ind);
|
||||
post_reentrancy(tsd);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void
|
||||
base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
|
||||
size_t size) {
|
||||
/*
|
||||
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
|
||||
* stopping at first success. This cascade is performed for consistency
|
||||
* with the cascade in extent_dalloc_wrapper() because an application's
|
||||
* custom hooks may not support e.g. dalloc. This function is only ever
|
||||
* called as a side effect of arena destruction, so although it might
|
||||
* seem pointless to do anything besides dalloc here, the application
|
||||
* may in fact want the end state of all associated virtual memory to be
|
||||
* in some consistent-but-allocated state.
|
||||
*/
|
||||
if (extent_hooks == &extent_hooks_default) {
|
||||
if (!extent_dalloc_mmap(addr, size)) {
|
||||
goto label_done;
|
||||
}
|
||||
if (!pages_decommit(addr, size)) {
|
||||
goto label_done;
|
||||
}
|
||||
if (!pages_purge_forced(addr, size)) {
|
||||
goto label_done;
|
||||
}
|
||||
if (!pages_purge_lazy(addr, size)) {
|
||||
goto label_done;
|
||||
}
|
||||
/* Nothing worked. This should never happen. */
|
||||
not_reached();
|
||||
} else {
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
pre_reentrancy(tsd, NULL);
|
||||
if (extent_hooks->dalloc != NULL &&
|
||||
!extent_hooks->dalloc(extent_hooks, addr, size, true,
|
||||
ind)) {
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
if (extent_hooks->decommit != NULL &&
|
||||
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
|
||||
ind)) {
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
if (extent_hooks->purge_forced != NULL &&
|
||||
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
|
||||
size, ind)) {
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
if (extent_hooks->purge_lazy != NULL &&
|
||||
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
|
||||
ind)) {
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
/* Nothing worked. That's the application's problem. */
|
||||
label_post_reentrancy:
|
||||
post_reentrancy(tsd);
|
||||
}
|
||||
label_done:
|
||||
if (metadata_thp_madvise()) {
|
||||
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
|
||||
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
|
||||
(size & HUGEPAGE_MASK) == 0);
|
||||
pages_nohuge(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
||||
size_t size) {
|
||||
size_t sn;
|
||||
|
||||
sn = *extent_sn_next;
|
||||
(*extent_sn_next)++;
|
||||
|
||||
extent_binit(extent, addr, size, sn);
|
||||
}
|
||||
|
||||
static size_t
|
||||
base_get_num_blocks(base_t *base, bool with_new_block) {
|
||||
base_block_t *b = base->blocks;
|
||||
assert(b != NULL);
|
||||
|
||||
size_t n_blocks = with_new_block ? 2 : 1;
|
||||
while (b->next != NULL) {
|
||||
n_blocks++;
|
||||
b = b->next;
|
||||
}
|
||||
|
||||
return n_blocks;
|
||||
}
|
||||
|
||||
static void
|
||||
base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
|
||||
assert(opt_metadata_thp == metadata_thp_auto);
|
||||
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
||||
if (base->auto_thp_switched) {
|
||||
return;
|
||||
}
|
||||
/* Called when adding a new block. */
|
||||
bool should_switch;
|
||||
if (base_ind_get(base) != 0) {
|
||||
should_switch = (base_get_num_blocks(base, true) ==
|
||||
BASE_AUTO_THP_THRESHOLD);
|
||||
} else {
|
||||
should_switch = (base_get_num_blocks(base, true) ==
|
||||
BASE_AUTO_THP_THRESHOLD_A0);
|
||||
}
|
||||
if (!should_switch) {
|
||||
return;
|
||||
}
|
||||
|
||||
base->auto_thp_switched = true;
|
||||
assert(!config_stats || base->n_thp == 0);
|
||||
/* Make the initial blocks THP lazily. */
|
||||
base_block_t *block = base->blocks;
|
||||
while (block != NULL) {
|
||||
assert((block->size & HUGEPAGE_MASK) == 0);
|
||||
pages_huge(block, block->size);
|
||||
if (config_stats) {
|
||||
base->n_thp += HUGEPAGE_CEILING(block->size -
|
||||
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
|
||||
}
|
||||
block = block->next;
|
||||
assert(block == NULL || (base_ind_get(base) == 0));
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
||||
size_t alignment) {
|
||||
void *ret;
|
||||
|
||||
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
|
||||
assert(size == ALIGNMENT_CEILING(size, alignment));
|
||||
|
||||
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
||||
alignment) - (uintptr_t)extent_addr_get(extent);
|
||||
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
|
||||
assert(extent_bsize_get(extent) >= *gap_size + size);
|
||||
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
|
||||
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
|
||||
extent_sn_get(extent));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
|
||||
void *addr, size_t size) {
|
||||
if (extent_bsize_get(extent) > 0) {
|
||||
/*
|
||||
* Compute the index for the largest size class that does not
|
||||
* exceed extent's size.
|
||||
*/
|
||||
szind_t index_floor =
|
||||
sz_size2index(extent_bsize_get(extent) + 1) - 1;
|
||||
extent_heap_insert(&base->avail[index_floor], extent);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
base->allocated += size;
|
||||
/*
|
||||
* Add one PAGE to base_resident for every page boundary that is
|
||||
* crossed by the new allocation. Adjust n_thp similarly when
|
||||
* metadata_thp is enabled.
|
||||
*/
|
||||
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
|
||||
PAGE_CEILING((uintptr_t)addr - gap_size);
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
if (metadata_thp_madvise() && (opt_metadata_thp ==
|
||||
metadata_thp_always || base->auto_thp_switched)) {
|
||||
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
|
||||
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
|
||||
LG_HUGEPAGE;
|
||||
assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
|
||||
size_t alignment) {
|
||||
void *ret;
|
||||
size_t gap_size;
|
||||
|
||||
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
|
||||
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a block of virtual memory that is large enough to start with a
|
||||
* base_block_t header, followed by an object of specified size and alignment.
|
||||
* On success a pointer to the initialized base_block_t header is returned.
|
||||
*/
|
||||
static base_block_t *
|
||||
base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
|
||||
unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
|
||||
size_t alignment) {
|
||||
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
|
||||
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
||||
size_t header_size = sizeof(base_block_t);
|
||||
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
|
||||
header_size;
|
||||
/*
|
||||
* Create increasingly larger blocks in order to limit the total number
|
||||
* of disjoint virtual memory ranges. Choose the next size in the page
|
||||
* size class series (skipping size classes that are not a multiple of
|
||||
* HUGEPAGE), or a size large enough to satisfy the requested size and
|
||||
* alignment, whichever is larger.
|
||||
*/
|
||||
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
|
||||
+ usize));
|
||||
pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
|
||||
*pind_last + 1 : *pind_last;
|
||||
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
|
||||
size_t block_size = (min_block_size > next_block_size) ? min_block_size
|
||||
: next_block_size;
|
||||
base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
|
||||
block_size);
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (metadata_thp_madvise()) {
|
||||
void *addr = (void *)block;
|
||||
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
|
||||
(block_size & HUGEPAGE_MASK) == 0);
|
||||
if (opt_metadata_thp == metadata_thp_always) {
|
||||
pages_huge(addr, block_size);
|
||||
} else if (opt_metadata_thp == metadata_thp_auto &&
|
||||
base != NULL) {
|
||||
/* base != NULL indicates this is not a new base. */
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
base_auto_thp_switch(tsdn, base);
|
||||
if (base->auto_thp_switched) {
|
||||
pages_huge(addr, block_size);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
}
|
||||
}
|
||||
|
||||
*pind_last = sz_psz2ind(block_size);
|
||||
block->size = block_size;
|
||||
block->next = NULL;
|
||||
assert(block_size >= header_size);
|
||||
base_extent_init(extent_sn_next, &block->extent,
|
||||
(void *)((uintptr_t)block + header_size), block_size - header_size);
|
||||
return block;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an extent that is at least as large as specified size, with
|
||||
* specified alignment.
|
||||
*/
|
||||
static extent_t *
|
||||
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
||||
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
||||
|
||||
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
|
||||
/*
|
||||
* Drop mutex during base_block_alloc(), because an extent hook will be
|
||||
* called.
|
||||
*/
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
|
||||
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
|
||||
alignment);
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
block->next = base->blocks;
|
||||
base->blocks = block;
|
||||
if (config_stats) {
|
||||
base->allocated += sizeof(base_block_t);
|
||||
base->resident += PAGE_CEILING(sizeof(base_block_t));
|
||||
base->mapped += block->size;
|
||||
if (metadata_thp_madvise() &&
|
||||
!(opt_metadata_thp == metadata_thp_auto
|
||||
&& !base->auto_thp_switched)) {
|
||||
assert(base->n_thp > 0);
|
||||
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
|
||||
LG_HUGEPAGE;
|
||||
}
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
||||
}
|
||||
return &block->extent;
|
||||
}
|
||||
|
||||
base_t *
|
||||
b0get(void) {
|
||||
return b0;
|
||||
}
|
||||
|
||||
base_t *
|
||||
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
pszind_t pind_last = 0;
|
||||
size_t extent_sn_next = 0;
|
||||
base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
|
||||
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t gap_size;
|
||||
size_t base_alignment = CACHELINE;
|
||||
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
|
||||
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
|
||||
&gap_size, base_size, base_alignment);
|
||||
base->ind = ind;
|
||||
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
|
||||
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
base_unmap(tsdn, extent_hooks, ind, block, block->size);
|
||||
return NULL;
|
||||
}
|
||||
base->pind_last = pind_last;
|
||||
base->extent_sn_next = extent_sn_next;
|
||||
base->blocks = block;
|
||||
base->auto_thp_switched = false;
|
||||
for (szind_t i = 0; i < SC_NSIZES; i++) {
|
||||
extent_heap_new(&base->avail[i]);
|
||||
}
|
||||
if (config_stats) {
|
||||
base->allocated = sizeof(base_block_t);
|
||||
base->resident = PAGE_CEILING(sizeof(base_block_t));
|
||||
base->mapped = block->size;
|
||||
base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
|
||||
metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
|
||||
>> LG_HUGEPAGE : 0;
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
||||
}
|
||||
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
|
||||
base_size);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
void
|
||||
base_delete(tsdn_t *tsdn, base_t *base) {
|
||||
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
|
||||
base_block_t *next = base->blocks;
|
||||
do {
|
||||
base_block_t *block = next;
|
||||
next = block->next;
|
||||
base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
|
||||
block->size);
|
||||
} while (next != NULL);
|
||||
}
|
||||
|
||||
extent_hooks_t *
|
||||
base_extent_hooks_get(base_t *base) {
|
||||
return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
extent_hooks_t *
|
||||
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
|
||||
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
|
||||
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
|
||||
return old_extent_hooks;
|
||||
}
|
||||
|
||||
static void *
|
||||
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
|
||||
size_t *esn) {
|
||||
alignment = QUANTUM_CEILING(alignment);
|
||||
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
||||
size_t asize = usize + alignment - QUANTUM;
|
||||
|
||||
extent_t *extent = NULL;
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
|
||||
extent = extent_heap_remove_first(&base->avail[i]);
|
||||
if (extent != NULL) {
|
||||
/* Use existing space. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (extent == NULL) {
|
||||
/* Try to allocate more space. */
|
||||
extent = base_extent_alloc(tsdn, base, usize, alignment);
|
||||
}
|
||||
void *ret;
|
||||
if (extent == NULL) {
|
||||
ret = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = base_extent_bump_alloc(base, extent, usize, alignment);
|
||||
if (esn != NULL) {
|
||||
*esn = extent_sn_get(extent);
|
||||
}
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
|
||||
* auto arenas, in order to make multi-page sparse data structures such as radix
|
||||
* tree nodes efficient with respect to physical memory usage. Upon success a
|
||||
* pointer to at least size bytes with specified alignment is returned. Note
|
||||
* that size is rounded up to the nearest multiple of alignment to avoid false
|
||||
* sharing.
|
||||
*/
|
||||
void *
|
||||
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
||||
return base_alloc_impl(tsdn, base, size, alignment, NULL);
|
||||
}
|
||||
|
||||
extent_t *
|
||||
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
|
||||
size_t esn;
|
||||
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
|
||||
CACHELINE, &esn);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
extent_esn_set(extent, esn);
|
||||
return extent;
|
||||
}
|
||||
|
||||
void
|
||||
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
|
||||
size_t *mapped, size_t *n_thp) {
|
||||
cassert(config_stats);
|
||||
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
*allocated = base->allocated;
|
||||
*resident = base->resident;
|
||||
*mapped = base->mapped;
|
||||
*n_thp = base->n_thp;
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_prefork(tsdn_t *tsdn, base_t *base) {
|
||||
malloc_mutex_prefork(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
|
||||
malloc_mutex_postfork_parent(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_child(tsdn_t *tsdn, base_t *base) {
|
||||
malloc_mutex_postfork_child(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
bool
|
||||
base_boot(tsdn_t *tsdn) {
|
||||
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
|
||||
return (b0 == NULL);
|
||||
}
|
||||
95
deps/jemalloc/src/bin.c
vendored
Normal file
95
deps/jemalloc/src/bin.c
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
bin_info_t bin_infos[SC_NBINS];
|
||||
|
||||
static void
|
||||
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
bin_info_t bin_infos[SC_NBINS]) {
|
||||
for (unsigned i = 0; i < SC_NBINS; i++) {
|
||||
bin_info_t *bin_info = &bin_infos[i];
|
||||
sc_t *sc = &sc_data->sc[i];
|
||||
bin_info->reg_size = ((size_t)1U << sc->lg_base)
|
||||
+ ((size_t)sc->ndelta << sc->lg_delta);
|
||||
bin_info->slab_size = (sc->pgs << LG_PAGE);
|
||||
bin_info->nregs =
|
||||
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
|
||||
bin_info->n_shards = bin_shard_sizes[i];
|
||||
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
|
||||
bin_info->nregs);
|
||||
bin_info->bitmap_info = bitmap_info;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
|
||||
size_t end_size, size_t nshards) {
|
||||
if (nshards > BIN_SHARDS_MAX || nshards == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (start_size > SC_SMALL_MAXCLASS) {
|
||||
return false;
|
||||
}
|
||||
if (end_size > SC_SMALL_MAXCLASS) {
|
||||
end_size = SC_SMALL_MAXCLASS;
|
||||
}
|
||||
|
||||
/* Compute the index since this may happen before sz init. */
|
||||
szind_t ind1 = sz_size2index_compute(start_size);
|
||||
szind_t ind2 = sz_size2index_compute(end_size);
|
||||
for (unsigned i = ind1; i <= ind2; i++) {
|
||||
bin_shard_sizes[i] = (unsigned)nshards;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
|
||||
/* Load the default number of shards. */
|
||||
for (unsigned i = 0; i < SC_NBINS; i++) {
|
||||
bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
|
||||
assert(sc_data->initialized);
|
||||
bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
|
||||
}
|
||||
|
||||
bool
|
||||
bin_init(bin_t *bin) {
|
||||
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
bin->slabcur = NULL;
|
||||
extent_heap_new(&bin->slabs_nonfull);
|
||||
extent_list_init(&bin->slabs_full);
|
||||
if (config_stats) {
|
||||
memset(&bin->stats, 0, sizeof(bin_stats_t));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
bin_prefork(tsdn_t *tsdn, bin_t *bin) {
|
||||
malloc_mutex_prefork(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
void
|
||||
bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
|
||||
malloc_mutex_postfork_parent(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
void
|
||||
bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
|
||||
malloc_mutex_postfork_child(tsdn, &bin->lock);
|
||||
}
|
||||
121
deps/jemalloc/src/bitmap.c
vendored
Normal file
121
deps/jemalloc/src/bitmap.c
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
#define JEMALLOC_BITMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef BITMAP_USE_TREE
|
||||
|
||||
void
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
|
||||
unsigned i;
|
||||
size_t group_count;
|
||||
|
||||
assert(nbits > 0);
|
||||
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
|
||||
|
||||
/*
|
||||
* Compute the number of groups necessary to store nbits bits, and
|
||||
* progressively work upward through the levels until reaching a level
|
||||
* that requires only one group.
|
||||
*/
|
||||
binfo->levels[0].group_offset = 0;
|
||||
group_count = BITMAP_BITS2GROUPS(nbits);
|
||||
for (i = 1; group_count > 1; i++) {
|
||||
assert(i < BITMAP_MAX_LEVELS);
|
||||
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
|
||||
+ group_count;
|
||||
group_count = BITMAP_BITS2GROUPS(group_count);
|
||||
}
|
||||
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
|
||||
+ group_count;
|
||||
assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
|
||||
binfo->nlevels = i;
|
||||
binfo->nbits = nbits;
|
||||
}
|
||||
|
||||
static size_t
|
||||
bitmap_info_ngroups(const bitmap_info_t *binfo) {
|
||||
return binfo->levels[binfo->nlevels].group_offset;
|
||||
}
|
||||
|
||||
void
|
||||
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
|
||||
size_t extra;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* Bits are actually inverted with regard to the external bitmap
|
||||
* interface.
|
||||
*/
|
||||
|
||||
if (fill) {
|
||||
/* The "filled" bitmap starts out with all 0 bits. */
|
||||
memset(bitmap, 0, bitmap_size(binfo));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The "empty" bitmap starts out with all 1 bits, except for trailing
|
||||
* unused bits (if any). Note that each group uses bit 0 to correspond
|
||||
* to the first logical bit in the group, so extra bits are the most
|
||||
* significant bits of the last group.
|
||||
*/
|
||||
memset(bitmap, 0xffU, bitmap_size(binfo));
|
||||
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
|
||||
& BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0) {
|
||||
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
|
||||
}
|
||||
for (i = 1; i < binfo->nlevels; i++) {
|
||||
size_t group_count = binfo->levels[i].group_offset -
|
||||
binfo->levels[i-1].group_offset;
|
||||
extra = (BITMAP_GROUP_NBITS - (group_count &
|
||||
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0) {
|
||||
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else /* BITMAP_USE_TREE */
|
||||
|
||||
void
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
|
||||
assert(nbits > 0);
|
||||
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
|
||||
|
||||
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
|
||||
binfo->nbits = nbits;
|
||||
}
|
||||
|
||||
static size_t
|
||||
bitmap_info_ngroups(const bitmap_info_t *binfo) {
|
||||
return binfo->ngroups;
|
||||
}
|
||||
|
||||
void
|
||||
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
|
||||
size_t extra;
|
||||
|
||||
if (fill) {
|
||||
memset(bitmap, 0, bitmap_size(binfo));
|
||||
return;
|
||||
}
|
||||
|
||||
memset(bitmap, 0xffU, bitmap_size(binfo));
|
||||
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
|
||||
& BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0) {
|
||||
bitmap[binfo->ngroups - 1] >>= extra;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
|
||||
size_t
|
||||
bitmap_size(const bitmap_info_t *binfo) {
|
||||
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
|
||||
}
|
||||
395
deps/jemalloc/src/chunk.c
vendored
Normal file
395
deps/jemalloc/src/chunk.c
vendored
Normal file
@@ -0,0 +1,395 @@
|
||||
#define JEMALLOC_CHUNK_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
const char *opt_dss = DSS_DEFAULT;
|
||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
|
||||
malloc_mutex_t chunks_mtx;
|
||||
chunk_stats_t stats_chunks;
|
||||
|
||||
/*
|
||||
* Trees of chunks that were previously allocated (trees differ only in node
|
||||
* ordering). These are used when allocating chunks, in an attempt to re-use
|
||||
* address space. Depending on function, different tree orderings are needed,
|
||||
* which is why there are two trees with the same contents.
|
||||
*/
|
||||
static extent_tree_t chunks_szad_mmap;
|
||||
static extent_tree_t chunks_ad_mmap;
|
||||
static extent_tree_t chunks_szad_dss;
|
||||
static extent_tree_t chunks_ad_dss;
|
||||
|
||||
rtree_t *chunks_rtree;
|
||||
|
||||
/* Various chunk-related settings. */
|
||||
size_t chunksize;
|
||||
size_t chunksize_mask; /* (chunksize - 1). */
|
||||
size_t chunk_npages;
|
||||
size_t map_bias;
|
||||
size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void *chunk_recycle(extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
|
||||
bool *zero);
|
||||
static void chunk_record(extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, void *chunk, size_t size);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
size_t alignment, bool base, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
extent_node_t key;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
bool zeroed;
|
||||
|
||||
if (base) {
|
||||
/*
|
||||
* This function may need to call base_node_{,de}alloc(), but
|
||||
* the current chunk allocation request is on behalf of the
|
||||
* base allocator. Avoid deadlock (and if that weren't an
|
||||
* issue, potential for infinite recursion) by returning NULL.
|
||||
*/
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
alloc_size = size + alignment - chunksize;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
key.addr = NULL;
|
||||
key.size = alloc_size;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
node = extent_tree_szad_nsearch(chunks_szad, &key);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
||||
(uintptr_t)node->addr;
|
||||
assert(node->size >= leadsize + size);
|
||||
trailsize = node->size - leadsize - size;
|
||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||
zeroed = node->zeroed;
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
if (leadsize != 0) {
|
||||
/* Insert the leading space as a smaller chunk. */
|
||||
node->size = leadsize;
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
node = NULL;
|
||||
}
|
||||
if (trailsize != 0) {
|
||||
/* Insert the trailing space as a smaller chunk. */
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* An additional node is required, but
|
||||
* base_node_alloc() can cause a new base chunk to be
|
||||
* allocated. Drop chunks_mtx in order to avoid
|
||||
* deadlock, and if node allocation fails, deallocate
|
||||
* the result before returning an error.
|
||||
*/
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
node = base_node_alloc();
|
||||
if (node == NULL) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
}
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = trailsize;
|
||||
node->zeroed = zeroed;
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
node = NULL;
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
if (node != NULL)
|
||||
base_node_dealloc(node);
|
||||
if (*zero) {
|
||||
if (zeroed == false)
|
||||
memset(ret, 0, size);
|
||||
else if (config_debug) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the caller specifies (*zero == false), it is still possible to receive
|
||||
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
|
||||
* takes advantage of this to avoid demanding zeroed chunks, but taking
|
||||
* advantage of them if they are returned.
|
||||
*/
|
||||
void *
|
||||
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
/* "primary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_primary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
/* mmap. */
|
||||
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
/* "secondary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_secondary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
label_return:
|
||||
if (ret != NULL) {
|
||||
if (config_ivsalloc && base == false) {
|
||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
if (config_stats || config_prof) {
|
||||
bool gdump;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks =
|
||||
stats_chunks.curchunks;
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_valgrind)
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
}
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *xnode, *node, *prev, *xprev, key;
|
||||
|
||||
unzeroed = pages_purge(chunk, size);
|
||||
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
/*
|
||||
* Allocate a node before acquiring chunks_mtx even though it might not
|
||||
* be needed, because base_node_alloc() may cause a new base chunk to
|
||||
* be allocated, which could cause deadlock if chunks_mtx were already
|
||||
* held.
|
||||
*/
|
||||
xnode = base_node_alloc();
|
||||
/* Use xprev to implement conditional deferred deallocation of prev. */
|
||||
xprev = NULL;
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
if (node != NULL && node->addr == key.addr) {
|
||||
/*
|
||||
* Coalesce chunk with the following address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert from/into chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = chunk;
|
||||
node->size += size;
|
||||
node->zeroed = (node->zeroed && (unzeroed == false));
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
if (xnode == NULL) {
|
||||
/*
|
||||
* base_node_alloc() failed, which is an exceedingly
|
||||
* unlikely failure. Leak chunk; its pages have
|
||||
* already been purged, so this is only a virtual
|
||||
* memory leak.
|
||||
*/
|
||||
goto label_return;
|
||||
}
|
||||
node = xnode;
|
||||
xnode = NULL; /* Prevent deallocation below. */
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = (unzeroed == false);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
}
|
||||
|
||||
/* Try to coalesce backward. */
|
||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
||||
chunk) {
|
||||
/*
|
||||
* Coalesce chunk with the previous address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert node from/into chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, prev);
|
||||
extent_tree_ad_remove(chunks_ad, prev);
|
||||
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = prev->addr;
|
||||
node->size += prev->size;
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
|
||||
xprev = prev;
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
/*
|
||||
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
|
||||
* avoid potential deadlock.
|
||||
*/
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
if (xprev != NULL)
|
||||
base_node_dealloc(xprev);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_unmap(void *chunk, size_t size)
|
||||
{
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_dss && chunk_in_dss(chunk))
|
||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
||||
else if (chunk_dealloc_mmap(chunk, size))
|
||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_ivsalloc)
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
|
||||
if (config_stats || config_prof) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
assert(stats_chunks.curchunks >= (size / chunksize));
|
||||
stats_chunks.curchunks -= (size / chunksize);
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
}
|
||||
|
||||
if (unmap)
|
||||
chunk_unmap(chunk, size);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_boot(void)
|
||||
{
|
||||
|
||||
/* Set variables according to the value of opt_lg_chunk. */
|
||||
chunksize = (ZU(1) << opt_lg_chunk);
|
||||
assert(chunksize >= PAGE);
|
||||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> LG_PAGE);
|
||||
|
||||
if (config_stats || config_prof) {
|
||||
if (malloc_mutex_init(&chunks_mtx))
|
||||
return (true);
|
||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||
}
|
||||
if (config_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
extent_tree_szad_new(&chunks_szad_mmap);
|
||||
extent_tree_ad_new(&chunks_ad_mmap);
|
||||
extent_tree_szad_new(&chunks_szad_dss);
|
||||
extent_tree_ad_new(&chunks_ad_dss);
|
||||
if (config_ivsalloc) {
|
||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, base_alloc, NULL);
|
||||
if (chunks_rtree == NULL)
|
||||
return (true);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&chunks_mtx);
|
||||
if (config_ivsalloc)
|
||||
rtree_prefork(chunks_rtree);
|
||||
chunk_dss_prefork();
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_parent(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_parent();
|
||||
if (config_ivsalloc)
|
||||
rtree_postfork_parent(chunks_rtree);
|
||||
malloc_mutex_postfork_parent(&chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_child(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_child();
|
||||
if (config_ivsalloc)
|
||||
rtree_postfork_child(chunks_rtree);
|
||||
malloc_mutex_postfork_child(&chunks_mtx);
|
||||
}
|
||||
198
deps/jemalloc/src/chunk_dss.c
vendored
Normal file
198
deps/jemalloc/src/chunk_dss.c
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
#define JEMALLOC_CHUNK_DSS_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
const char *dss_prec_names[] = {
|
||||
"disabled",
|
||||
"primary",
|
||||
"secondary",
|
||||
"N/A"
|
||||
};
|
||||
|
||||
/* Current dss precedence default, used when creating new arenas. */
|
||||
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
|
||||
|
||||
/*
|
||||
* Protects sbrk() calls. This avoids malloc races among threads, though it
|
||||
* does not protect against races with threads that call sbrk() directly.
|
||||
*/
|
||||
static malloc_mutex_t dss_mtx;
|
||||
|
||||
/* Base address of the DSS. */
|
||||
static void *dss_base;
|
||||
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
|
||||
static void *dss_prev;
|
||||
/* Current upper limit on DSS addresses. */
|
||||
static void *dss_max;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
chunk_dss_sbrk(intptr_t increment)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_HAVE_SBRK
|
||||
return (sbrk(increment));
|
||||
#else
|
||||
not_implemented();
|
||||
return (NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
dss_prec_t
|
||||
chunk_dss_prec_get(void)
|
||||
{
|
||||
dss_prec_t ret;
|
||||
|
||||
if (config_dss == false)
|
||||
return (dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
ret = dss_prec_default;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_dss_prec_set(dss_prec_t dss_prec)
|
||||
{
|
||||
|
||||
if (config_dss == false)
|
||||
return (true);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
dss_prec_default = dss_prec;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (false);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
cassert(config_dss);
|
||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
||||
|
||||
/*
|
||||
* sbrk() uses a signed increment argument, so take care not to
|
||||
* interpret a huge allocation request as a negative increment.
|
||||
*/
|
||||
if ((intptr_t)size < 0)
|
||||
return (NULL);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if (dss_prev != (void *)-1) {
|
||||
size_t gap_size, cpad_size;
|
||||
void *cpad, *dss_next;
|
||||
intptr_t incr;
|
||||
|
||||
/*
|
||||
* The loop is necessary to recover from races with other
|
||||
* threads that are using the DSS for something other than
|
||||
* malloc.
|
||||
*/
|
||||
do {
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = chunk_dss_sbrk(0);
|
||||
/*
|
||||
* Calculate how much padding is necessary to
|
||||
* chunk-align the end of the DSS.
|
||||
*/
|
||||
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
|
||||
chunksize_mask;
|
||||
/*
|
||||
* Compute how much chunk-aligned pad space (if any) is
|
||||
* necessary to satisfy alignment. This space can be
|
||||
* recycled for later use.
|
||||
*/
|
||||
cpad = (void *)((uintptr_t)dss_max + gap_size);
|
||||
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
|
||||
alignment);
|
||||
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
|
||||
dss_next = (void *)((uintptr_t)ret + size);
|
||||
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
||||
/* Wrap-around. */
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
incr = gap_size + cpad_size + size;
|
||||
dss_prev = chunk_dss_sbrk(incr);
|
||||
if (dss_prev == dss_max) {
|
||||
/* Success. */
|
||||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_unmap(cpad, cpad_size);
|
||||
if (*zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
} while (dss_prev != (void *)-1);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_in_dss(void *chunk)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
||||
ret = true;
|
||||
else
|
||||
ret = false;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_dss_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
if (malloc_mutex_init(&dss_mtx))
|
||||
return (true);
|
||||
dss_base = chunk_dss_sbrk(0);
|
||||
dss_prev = dss_base;
|
||||
dss_max = dss_base;
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dss_prefork(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
malloc_mutex_prefork(&dss_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dss_postfork_parent(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
malloc_mutex_postfork_parent(&dss_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dss_postfork_child(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
malloc_mutex_postfork_child(&dss_mtx);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
210
deps/jemalloc/src/chunk_mmap.c
vendored
Normal file
210
deps/jemalloc/src/chunk_mmap.c
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
#define JEMALLOC_CHUNK_MMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void *pages_map(void *addr, size_t size);
|
||||
static void pages_unmap(void *addr, size_t size);
|
||||
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
||||
bool *zero);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
pages_map(void *addr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||
-1, 0);
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
ret = NULL;
|
||||
else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
if (munmap(ret, size) == -1) {
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc: Error in munmap(): %s\n",
|
||||
buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
ret = NULL;
|
||||
}
|
||||
#endif
|
||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||
|| (addr != NULL && ret == addr));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
pages_unmap(void *addr, size_t size)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||
#else
|
||||
if (munmap(addr, size) == -1)
|
||||
#endif
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
#else
|
||||
"munmap"
|
||||
#endif
|
||||
"(): %s\n", buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
{
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
#ifdef _WIN32
|
||||
{
|
||||
void *new_addr;
|
||||
|
||||
pages_unmap(addr, alloc_size);
|
||||
new_addr = pages_map(ret, size);
|
||||
if (new_addr == ret)
|
||||
return (ret);
|
||||
if (new_addr)
|
||||
pages_unmap(new_addr, size);
|
||||
return (NULL);
|
||||
}
|
||||
#else
|
||||
{
|
||||
size_t trailsize = alloc_size - leadsize - size;
|
||||
|
||||
if (leadsize != 0)
|
||||
pages_unmap(addr, leadsize);
|
||||
if (trailsize != 0)
|
||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge(void *addr, size_t length)
|
||||
{
|
||||
bool unzeroed;
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
|
||||
unzeroed = true;
|
||||
#else
|
||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||
# define JEMALLOC_MADV_ZEROS false
|
||||
# else
|
||||
# error "No method defined for purging unused dirty pages."
|
||||
# endif
|
||||
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
|
||||
unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
|
||||
# undef JEMALLOC_MADV_PURGE
|
||||
# undef JEMALLOC_MADV_ZEROS
|
||||
#endif
|
||||
return (unzeroed);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret, *pages;
|
||||
size_t alloc_size, leadsize;
|
||||
|
||||
alloc_size = size + alignment - PAGE;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
do {
|
||||
pages = pages_map(NULL, alloc_size);
|
||||
if (pages == NULL)
|
||||
return (NULL);
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
|
||||
(uintptr_t)pages;
|
||||
ret = pages_trim(pages, alloc_size, leadsize, size);
|
||||
} while (ret == NULL);
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t offset;
|
||||
|
||||
/*
|
||||
* Ideally, there would be a way to specify alignment to mmap() (like
|
||||
* NetBSD has), but in the absence of such a feature, we have to work
|
||||
* hard to efficiently create aligned mappings. The reliable, but
|
||||
* slow method is to create a mapping that is over-sized, then trim the
|
||||
* excess. However, that always results in one or two calls to
|
||||
* pages_unmap().
|
||||
*
|
||||
* Optimistically try mapping precisely the right amount before falling
|
||||
* back to the slow method, with the expectation that the optimistic
|
||||
* approach works most of the time.
|
||||
*/
|
||||
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
ret = pages_map(NULL, size);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
||||
if (offset != 0) {
|
||||
pages_unmap(ret, size);
|
||||
return (chunk_alloc_mmap_slow(size, alignment, zero));
|
||||
}
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_dealloc_mmap(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
if (config_munmap)
|
||||
pages_unmap(chunk, size);
|
||||
|
||||
return (config_munmap == false);
|
||||
}
|
||||
570
deps/jemalloc/src/ckh.c
vendored
Normal file
570
deps/jemalloc/src/ckh.c
vendored
Normal file
@@ -0,0 +1,570 @@
|
||||
/*
|
||||
*******************************************************************************
|
||||
* Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
|
||||
* hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
|
||||
* functions are employed. The original cuckoo hashing algorithm was described
|
||||
* in:
|
||||
*
|
||||
* Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
|
||||
* 51(2):122-144.
|
||||
*
|
||||
* Generalization of cuckoo hashing was discussed in:
|
||||
*
|
||||
* Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
|
||||
* alternative to traditional hash tables. In Proceedings of the 7th
|
||||
* Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
|
||||
* January 2006.
|
||||
*
|
||||
* This implementation uses precisely two hash functions because that is the
|
||||
* fewest that can work, and supporting multiple hashes is an implementation
|
||||
* burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
|
||||
* that shows approximate expected maximum load factors for various
|
||||
* configurations:
|
||||
*
|
||||
* | #cells/bucket |
|
||||
* #hashes | 1 | 2 | 4 | 8 |
|
||||
* --------+-------+-------+-------+-------+
|
||||
* 1 | 0.006 | 0.006 | 0.03 | 0.12 |
|
||||
* 2 | 0.49 | 0.86 |>0.93< |>0.96< |
|
||||
* 3 | 0.91 | 0.97 | 0.98 | 0.999 |
|
||||
* 4 | 0.97 | 0.99 | 0.999 | |
|
||||
*
|
||||
* The number of cells per bucket is chosen such that a bucket fits in one cache
|
||||
* line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
|
||||
* respectively.
|
||||
*
|
||||
******************************************************************************/
|
||||
#define JEMALLOC_CKH_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/ckh.h"
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/prng.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
|
||||
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* Search bucket for key and return the cell number if found; SIZE_T_MAX
|
||||
* otherwise.
|
||||
*/
|
||||
static size_t
|
||||
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
|
||||
ckhc_t *cell;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||
if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
|
||||
return (bucket << LG_CKH_BUCKET_CELLS) + i;
|
||||
}
|
||||
}
|
||||
|
||||
return SIZE_T_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
|
||||
*/
|
||||
static size_t
|
||||
ckh_isearch(ckh_t *ckh, const void *key) {
|
||||
size_t hashes[2], bucket, cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
ckh->hash(key, hashes);
|
||||
|
||||
/* Search primary bucket. */
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
return cell;
|
||||
}
|
||||
|
||||
/* Search secondary bucket. */
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
return cell;
|
||||
}
|
||||
|
||||
static bool
|
||||
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
const void *data) {
|
||||
ckhc_t *cell;
|
||||
unsigned offset, i;
|
||||
|
||||
/*
|
||||
* Cycle through the cells in the bucket, starting at a random position.
|
||||
* The randomness avoids worst-case search overhead as buckets fill up.
|
||||
*/
|
||||
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
|
||||
LG_CKH_BUCKET_CELLS);
|
||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
||||
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
||||
if (cell->key == NULL) {
|
||||
cell->key = key;
|
||||
cell->data = data;
|
||||
ckh->count++;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* No space is available in bucket. Randomly evict an item, then try to find an
|
||||
* alternate location for that item. Iteratively repeat this
|
||||
* eviction/relocation procedure until either success or detection of an
|
||||
* eviction/relocation bucket cycle.
|
||||
*/
|
||||
static bool
|
||||
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
void const **argdata) {
|
||||
const void *key, *data, *tkey, *tdata;
|
||||
ckhc_t *cell;
|
||||
size_t hashes[2], bucket, tbucket;
|
||||
unsigned i;
|
||||
|
||||
bucket = argbucket;
|
||||
key = *argkey;
|
||||
data = *argdata;
|
||||
while (true) {
|
||||
/*
|
||||
* Choose a random item within the bucket to evict. This is
|
||||
* critical to correct function, because without (eventually)
|
||||
* evicting all items within a bucket during iteration, it
|
||||
* would be possible to get stuck in an infinite loop if there
|
||||
* were an item for which both hashes indicated the same
|
||||
* bucket.
|
||||
*/
|
||||
i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
|
||||
LG_CKH_BUCKET_CELLS);
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||
assert(cell->key != NULL);
|
||||
|
||||
/* Swap cell->{key,data} and {key,data} (evict). */
|
||||
tkey = cell->key; tdata = cell->data;
|
||||
cell->key = key; cell->data = data;
|
||||
key = tkey; data = tdata;
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
ckh->nrelocs++;
|
||||
#endif
|
||||
|
||||
/* Find the alternate bucket for the evicted item. */
|
||||
ckh->hash(key, hashes);
|
||||
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (tbucket == bucket) {
|
||||
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
|
||||
- 1);
|
||||
/*
|
||||
* It may be that (tbucket == bucket) still, if the
|
||||
* item's hashes both indicate this bucket. However,
|
||||
* we are guaranteed to eventually escape this bucket
|
||||
* during iteration, assuming pseudo-random item
|
||||
* selection (true randomness would make infinite
|
||||
* looping a remote possibility). The reason we can
|
||||
* never get trapped forever is that there are two
|
||||
* cases:
|
||||
*
|
||||
* 1) This bucket == argbucket, so we will quickly
|
||||
* detect an eviction cycle and terminate.
|
||||
* 2) An item was evicted to this bucket from another,
|
||||
* which means that at least one item in this bucket
|
||||
* has hashes that indicate distinct buckets.
|
||||
*/
|
||||
}
|
||||
/* Check for a cycle. */
|
||||
if (tbucket == argbucket) {
|
||||
*argkey = key;
|
||||
*argdata = data;
|
||||
return true;
|
||||
}
|
||||
|
||||
bucket = tbucket;
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
|
||||
size_t hashes[2], bucket;
|
||||
const void *key = *argkey;
|
||||
const void *data = *argdata;
|
||||
|
||||
ckh->hash(key, hashes);
|
||||
|
||||
/* Try to insert in primary bucket. */
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Try to insert in secondary bucket. */
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to find a place for this item via iterative eviction/relocation.
|
||||
*/
|
||||
return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to rebuild the hash table from scratch by inserting all items from the
|
||||
* old table into the new.
|
||||
*/
|
||||
static bool
|
||||
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
|
||||
size_t count, i, nins;
|
||||
const void *key, *data;
|
||||
|
||||
count = ckh->count;
|
||||
ckh->count = 0;
|
||||
for (i = nins = 0; nins < count; i++) {
|
||||
if (aTab[i].key != NULL) {
|
||||
key = aTab[i].key;
|
||||
data = aTab[i].data;
|
||||
if (ckh_try_insert(ckh, &key, &data)) {
|
||||
ckh->count = count;
|
||||
return true;
|
||||
}
|
||||
nins++;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
ckh_grow(tsd_t *tsd, ckh_t *ckh) {
|
||||
bool ret;
|
||||
ckhc_t *tab, *ttab;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
ckh->ngrows++;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* It is possible (though unlikely, given well behaved hashes) that the
|
||||
* table will have to be doubled more than once in order to create a
|
||||
* usable table.
|
||||
*/
|
||||
lg_prevbuckets = ckh->lg_curbuckets;
|
||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
|
||||
while (true) {
|
||||
size_t usize;
|
||||
|
||||
lg_curcells++;
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0
|
||||
|| usize > SC_LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
|
||||
true, NULL, true, arena_ichoose(tsd, NULL));
|
||||
if (tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
/* Swap in new table. */
|
||||
ttab = ckh->tab;
|
||||
ckh->tab = tab;
|
||||
tab = ttab;
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
}
|
||||
|
||||
ret = false;
|
||||
label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t usize;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
/*
|
||||
* It is possible (though unlikely, given well behaved hashes) that the
|
||||
* table rebuild will fail.
|
||||
*/
|
||||
lg_prevbuckets = ckh->lg_curbuckets;
|
||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
return;
|
||||
}
|
||||
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
|
||||
true, arena_ichoose(tsd, NULL));
|
||||
if (tab == NULL) {
|
||||
/*
|
||||
* An OOM error isn't worth propagating, since it doesn't
|
||||
* prevent this or future operations from proceeding.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
/* Swap in new table. */
|
||||
ttab = ckh->tab;
|
||||
ckh->tab = tab;
|
||||
tab = ttab;
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
|
||||
#ifdef CKH_COUNT
|
||||
ckh->nshrinks++;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
#ifdef CKH_COUNT
|
||||
ckh->nshrinkfails++;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp) {
|
||||
bool ret;
|
||||
size_t mincells, usize;
|
||||
unsigned lg_mincells;
|
||||
|
||||
assert(minitems > 0);
|
||||
assert(hash != NULL);
|
||||
assert(keycomp != NULL);
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
ckh->ngrows = 0;
|
||||
ckh->nshrinks = 0;
|
||||
ckh->nshrinkfails = 0;
|
||||
ckh->ninserts = 0;
|
||||
ckh->nrelocs = 0;
|
||||
#endif
|
||||
ckh->prng_state = 42; /* Value doesn't really matter. */
|
||||
ckh->count = 0;
|
||||
|
||||
/*
|
||||
* Find the minimum power of 2 that is large enough to fit minitems
|
||||
* entries. We are using (2+,2) cuckoo hashing, which has an expected
|
||||
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
|
||||
* factor that will typically allow mincells items to fit without ever
|
||||
* growing the table.
|
||||
*/
|
||||
assert(LG_CKH_BUCKET_CELLS > 0);
|
||||
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
|
||||
for (lg_mincells = LG_CKH_BUCKET_CELLS;
|
||||
(ZU(1) << lg_mincells) < mincells;
|
||||
lg_mincells++) {
|
||||
/* Do nothing. */
|
||||
}
|
||||
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
|
||||
ckh->hash = hash;
|
||||
ckh->keycomp = keycomp;
|
||||
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
|
||||
NULL, true, arena_ichoose(tsd, NULL));
|
||||
if (ckh->tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = false;
|
||||
label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
ckh_delete(tsd_t *tsd, ckh_t *ckh) {
|
||||
assert(ckh != NULL);
|
||||
|
||||
#ifdef CKH_VERBOSE
|
||||
malloc_printf(
|
||||
"%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
|
||||
" nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
|
||||
" nrelocs: %"FMTu64"\n", __func__, ckh,
|
||||
(unsigned long long)ckh->ngrows,
|
||||
(unsigned long long)ckh->nshrinks,
|
||||
(unsigned long long)ckh->nshrinkfails,
|
||||
(unsigned long long)ckh->ninserts,
|
||||
(unsigned long long)ckh->nrelocs);
|
||||
#endif
|
||||
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
|
||||
if (config_debug) {
|
||||
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
|
||||
}
|
||||
}
|
||||
|
||||
size_t
|
||||
ckh_count(ckh_t *ckh) {
|
||||
assert(ckh != NULL);
|
||||
|
||||
return ckh->count;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
|
||||
size_t i, ncells;
|
||||
|
||||
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
|
||||
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
|
||||
if (ckh->tab[i].key != NULL) {
|
||||
if (key != NULL) {
|
||||
*key = (void *)ckh->tab[i].key;
|
||||
}
|
||||
if (data != NULL) {
|
||||
*data = (void *)ckh->tab[i].data;
|
||||
}
|
||||
*tabind = i + 1;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
|
||||
bool ret;
|
||||
|
||||
assert(ckh != NULL);
|
||||
assert(ckh_search(ckh, key, NULL, NULL));
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
ckh->ninserts++;
|
||||
#endif
|
||||
|
||||
while (ckh_try_insert(ckh, &key, &data)) {
|
||||
if (ckh_grow(tsd, ckh)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = false;
|
||||
label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data) {
|
||||
size_t cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
cell = ckh_isearch(ckh, searchkey);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
if (key != NULL) {
|
||||
*key = (void *)ckh->tab[cell].key;
|
||||
}
|
||||
if (data != NULL) {
|
||||
*data = (void *)ckh->tab[cell].data;
|
||||
}
|
||||
ckh->tab[cell].key = NULL;
|
||||
ckh->tab[cell].data = NULL; /* Not necessary. */
|
||||
|
||||
ckh->count--;
|
||||
/* Try to halve the table if it is less than 1/4 full. */
|
||||
if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
|
||||
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
|
||||
> ckh->lg_minbuckets) {
|
||||
/* Ignore error due to OOM. */
|
||||
ckh_shrink(tsd, ckh);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
|
||||
size_t cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
cell = ckh_isearch(ckh, searchkey);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
if (key != NULL) {
|
||||
*key = (void *)ckh->tab[cell].key;
|
||||
}
|
||||
if (data != NULL) {
|
||||
*data = (void *)ckh->tab[cell].data;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ckh_string_hash(const void *key, size_t r_hash[2]) {
|
||||
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_string_keycomp(const void *k1, const void *k2) {
|
||||
assert(k1 != NULL);
|
||||
assert(k2 != NULL);
|
||||
|
||||
return !strcmp((char *)k1, (char *)k2);
|
||||
}
|
||||
|
||||
void
|
||||
ckh_pointer_hash(const void *key, size_t r_hash[2]) {
|
||||
union {
|
||||
const void *v;
|
||||
size_t i;
|
||||
} u;
|
||||
|
||||
assert(sizeof(u.v) == sizeof(u.i));
|
||||
u.v = key;
|
||||
hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_pointer_keycomp(const void *k1, const void *k2) {
|
||||
return (k1 == k2);
|
||||
}
|
||||
3435
deps/jemalloc/src/ctl.c
vendored
Normal file
3435
deps/jemalloc/src/ctl.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
55
deps/jemalloc/src/div.c
vendored
Normal file
55
deps/jemalloc/src/div.c
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/div.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
|
||||
*
|
||||
* For any k, we have (here, all division is exact; not C-style rounding):
|
||||
* floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
|
||||
* r = (-2^k) mod d.
|
||||
*
|
||||
* Expanding this out:
|
||||
* ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
|
||||
* = floor(n / d + (r / d) * (n / 2^k)).
|
||||
*
|
||||
* The fractional part of n / d is 0 (because of the assumption that d divides n
|
||||
* exactly), so we have:
|
||||
* ... = n / d + floor((r / d) * (n / 2^k))
|
||||
*
|
||||
* So that our initial expression is equal to the quantity we seek, so long as
|
||||
* (r / d) * (n / 2^k) < 1.
|
||||
*
|
||||
* r is a remainder mod d, so r < d and r / d < 1 always. We can make
|
||||
* n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
|
||||
*/
|
||||
|
||||
void
|
||||
div_init(div_info_t *div_info, size_t d) {
|
||||
/* Nonsensical. */
|
||||
assert(d != 0);
|
||||
/*
|
||||
* This would make the value of magic too high to fit into a uint32_t
|
||||
* (we would want magic = 2^32 exactly). This would mess with code gen
|
||||
* on 32-bit machines.
|
||||
*/
|
||||
assert(d != 1);
|
||||
|
||||
uint64_t two_to_k = ((uint64_t)1 << 32);
|
||||
uint32_t magic = (uint32_t)(two_to_k / d);
|
||||
|
||||
/*
|
||||
* We want magic = ceil(2^k / d), but C gives us floor. We have to
|
||||
* increment it unless the result was exact (i.e. unless d is a power of
|
||||
* two).
|
||||
*/
|
||||
if (two_to_k % d != 0) {
|
||||
magic++;
|
||||
}
|
||||
div_info->magic = magic;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
div_info->d = d;
|
||||
#endif
|
||||
}
|
||||
2403
deps/jemalloc/src/extent.c
vendored
Normal file
2403
deps/jemalloc/src/extent.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
271
deps/jemalloc/src/extent_dss.c
vendored
Normal file
271
deps/jemalloc/src/extent_dss.c
vendored
Normal file
@@ -0,0 +1,271 @@
|
||||
#define JEMALLOC_EXTENT_DSS_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/spin.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
const char *opt_dss = DSS_DEFAULT;
|
||||
|
||||
const char *dss_prec_names[] = {
|
||||
"disabled",
|
||||
"primary",
|
||||
"secondary",
|
||||
"N/A"
|
||||
};
|
||||
|
||||
/*
|
||||
* Current dss precedence default, used when creating new arenas. NB: This is
|
||||
* stored as unsigned rather than dss_prec_t because in principle there's no
|
||||
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
|
||||
* atomic operations to synchronize the setting.
|
||||
*/
|
||||
static atomic_u_t dss_prec_default = ATOMIC_INIT(
|
||||
(unsigned)DSS_PREC_DEFAULT);
|
||||
|
||||
/* Base address of the DSS. */
|
||||
static void *dss_base;
|
||||
/* Atomic boolean indicating whether a thread is currently extending DSS. */
|
||||
static atomic_b_t dss_extending;
|
||||
/* Atomic boolean indicating whether the DSS is exhausted. */
|
||||
static atomic_b_t dss_exhausted;
|
||||
/* Atomic current upper limit on DSS addresses. */
|
||||
static atomic_p_t dss_max;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
extent_dss_sbrk(intptr_t increment) {
|
||||
#ifdef JEMALLOC_DSS
|
||||
return sbrk(increment);
|
||||
#else
|
||||
not_implemented();
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
dss_prec_t
|
||||
extent_dss_prec_get(void) {
|
||||
dss_prec_t ret;
|
||||
|
||||
if (!have_dss) {
|
||||
return dss_prec_disabled;
|
||||
}
|
||||
ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
extent_dss_prec_set(dss_prec_t dss_prec) {
|
||||
if (!have_dss) {
|
||||
return (dss_prec != dss_prec_disabled);
|
||||
}
|
||||
atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
extent_dss_extending_start(void) {
|
||||
spin_t spinner = SPIN_INITIALIZER;
|
||||
while (true) {
|
||||
bool expected = false;
|
||||
if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
|
||||
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
|
||||
break;
|
||||
}
|
||||
spin_adaptive(&spinner);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
extent_dss_extending_finish(void) {
|
||||
assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
|
||||
|
||||
atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static void *
|
||||
extent_dss_max_update(void *new_addr) {
|
||||
/*
|
||||
* Get the current end of the DSS as max_cur and assure that dss_max is
|
||||
* up to date.
|
||||
*/
|
||||
void *max_cur = extent_dss_sbrk(0);
|
||||
if (max_cur == (void *)-1) {
|
||||
return NULL;
|
||||
}
|
||||
atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
|
||||
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
|
||||
if (new_addr != NULL && max_cur != new_addr) {
|
||||
return NULL;
|
||||
}
|
||||
return max_cur;
|
||||
}
|
||||
|
||||
void *
|
||||
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit) {
|
||||
extent_t *gap;
|
||||
|
||||
cassert(have_dss);
|
||||
assert(size > 0);
|
||||
assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
|
||||
|
||||
/*
|
||||
* sbrk() uses a signed increment argument, so take care not to
|
||||
* interpret a large allocation request as a negative increment.
|
||||
*/
|
||||
if ((intptr_t)size < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
gap = extent_alloc(tsdn, arena);
|
||||
if (gap == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extent_dss_extending_start();
|
||||
if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
|
||||
/*
|
||||
* The loop is necessary to recover from races with other
|
||||
* threads that are using the DSS for something other than
|
||||
* malloc.
|
||||
*/
|
||||
while (true) {
|
||||
void *max_cur = extent_dss_max_update(new_addr);
|
||||
if (max_cur == NULL) {
|
||||
goto label_oom;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute how much page-aligned gap space (if any) is
|
||||
* necessary to satisfy alignment. This space can be
|
||||
* recycled for later use.
|
||||
*/
|
||||
void *gap_addr_page = (void *)(PAGE_CEILING(
|
||||
(uintptr_t)max_cur));
|
||||
void *ret = (void *)ALIGNMENT_CEILING(
|
||||
(uintptr_t)gap_addr_page, alignment);
|
||||
size_t gap_size_page = (uintptr_t)ret -
|
||||
(uintptr_t)gap_addr_page;
|
||||
if (gap_size_page != 0) {
|
||||
extent_init(gap, arena, gap_addr_page,
|
||||
gap_size_page, false, SC_NSIZES,
|
||||
arena_extent_sn_next(arena),
|
||||
extent_state_active, false, true, true,
|
||||
EXTENT_NOT_HEAD);
|
||||
}
|
||||
/*
|
||||
* Compute the address just past the end of the desired
|
||||
* allocation space.
|
||||
*/
|
||||
void *dss_next = (void *)((uintptr_t)ret + size);
|
||||
if ((uintptr_t)ret < (uintptr_t)max_cur ||
|
||||
(uintptr_t)dss_next < (uintptr_t)max_cur) {
|
||||
goto label_oom; /* Wrap-around. */
|
||||
}
|
||||
/* Compute the increment, including subpage bytes. */
|
||||
void *gap_addr_subpage = max_cur;
|
||||
size_t gap_size_subpage = (uintptr_t)ret -
|
||||
(uintptr_t)gap_addr_subpage;
|
||||
intptr_t incr = gap_size_subpage + size;
|
||||
|
||||
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
|
||||
size);
|
||||
|
||||
/* Try to allocate. */
|
||||
void *dss_prev = extent_dss_sbrk(incr);
|
||||
if (dss_prev == max_cur) {
|
||||
/* Success. */
|
||||
atomic_store_p(&dss_max, dss_next,
|
||||
ATOMIC_RELEASE);
|
||||
extent_dss_extending_finish();
|
||||
|
||||
if (gap_size_page != 0) {
|
||||
extent_dalloc_gap(tsdn, arena, gap);
|
||||
} else {
|
||||
extent_dalloc(tsdn, arena, gap);
|
||||
}
|
||||
if (!*commit) {
|
||||
*commit = pages_decommit(ret, size);
|
||||
}
|
||||
if (*zero && *commit) {
|
||||
extent_hooks_t *extent_hooks =
|
||||
EXTENT_HOOKS_INITIALIZER;
|
||||
extent_t extent;
|
||||
|
||||
extent_init(&extent, arena, ret, size,
|
||||
size, false, SC_NSIZES,
|
||||
extent_state_active, false, true,
|
||||
true, EXTENT_NOT_HEAD);
|
||||
if (extent_purge_forced_wrapper(tsdn,
|
||||
arena, &extent_hooks, &extent, 0,
|
||||
size)) {
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Failure, whether due to OOM or a race with a raw
|
||||
* sbrk() call from outside the allocator.
|
||||
*/
|
||||
if (dss_prev == (void *)-1) {
|
||||
/* OOM. */
|
||||
atomic_store_b(&dss_exhausted, true,
|
||||
ATOMIC_RELEASE);
|
||||
goto label_oom;
|
||||
}
|
||||
}
|
||||
}
|
||||
label_oom:
|
||||
extent_dss_extending_finish();
|
||||
extent_dalloc(tsdn, arena, gap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool
|
||||
extent_in_dss_helper(void *addr, void *max) {
|
||||
return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
|
||||
(uintptr_t)max);
|
||||
}
|
||||
|
||||
bool
|
||||
extent_in_dss(void *addr) {
|
||||
cassert(have_dss);
|
||||
|
||||
return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
|
||||
ATOMIC_ACQUIRE));
|
||||
}
|
||||
|
||||
bool
|
||||
extent_dss_mergeable(void *addr_a, void *addr_b) {
|
||||
void *max;
|
||||
|
||||
cassert(have_dss);
|
||||
|
||||
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
|
||||
(uintptr_t)dss_base) {
|
||||
return true;
|
||||
}
|
||||
|
||||
max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
|
||||
return (extent_in_dss_helper(addr_a, max) ==
|
||||
extent_in_dss_helper(addr_b, max));
|
||||
}
|
||||
|
||||
void
|
||||
extent_dss_boot(void) {
|
||||
cassert(have_dss);
|
||||
|
||||
dss_base = extent_dss_sbrk(0);
|
||||
atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
|
||||
atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
|
||||
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
42
deps/jemalloc/src/extent_mmap.c
vendored
Normal file
42
deps/jemalloc/src/extent_mmap.c
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
#define JEMALLOC_EXTENT_MMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
bool opt_retain =
|
||||
#ifdef JEMALLOC_RETAIN
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit) {
|
||||
assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
|
||||
void *ret = pages_map(new_addr, size, alignment, commit);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(ret != NULL);
|
||||
if (*commit) {
|
||||
*zero = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
extent_dalloc_mmap(void *addr, size_t size) {
|
||||
if (!opt_retain) {
|
||||
pages_unmap(addr, size);
|
||||
}
|
||||
return opt_retain;
|
||||
}
|
||||
3
deps/jemalloc/src/hash.c
vendored
Normal file
3
deps/jemalloc/src/hash.c
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#define JEMALLOC_HASH_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
195
deps/jemalloc/src/hook.c
vendored
Normal file
195
deps/jemalloc/src/hook.c
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/hook.h"
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/seq.h"
|
||||
|
||||
typedef struct hooks_internal_s hooks_internal_t;
|
||||
struct hooks_internal_s {
|
||||
hooks_t hooks;
|
||||
bool in_use;
|
||||
};
|
||||
|
||||
seq_define(hooks_internal_t, hooks)
|
||||
|
||||
static atomic_u_t nhooks = ATOMIC_INIT(0);
|
||||
static seq_hooks_t hooks[HOOK_MAX];
|
||||
static malloc_mutex_t hooks_mu;
|
||||
|
||||
bool
|
||||
hook_boot() {
|
||||
return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
|
||||
malloc_mutex_rank_exclusive);
|
||||
}
|
||||
|
||||
static void *
|
||||
hook_install_locked(hooks_t *to_install) {
|
||||
hooks_internal_t hooks_internal;
|
||||
for (int i = 0; i < HOOK_MAX; i++) {
|
||||
bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
|
||||
/* We hold mu; no concurrent access. */
|
||||
assert(success);
|
||||
if (!hooks_internal.in_use) {
|
||||
hooks_internal.hooks = *to_install;
|
||||
hooks_internal.in_use = true;
|
||||
seq_store_hooks(&hooks[i], &hooks_internal);
|
||||
atomic_store_u(&nhooks,
|
||||
atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
|
||||
ATOMIC_RELAXED);
|
||||
return &hooks[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *
|
||||
hook_install(tsdn_t *tsdn, hooks_t *to_install) {
|
||||
malloc_mutex_lock(tsdn, &hooks_mu);
|
||||
void *ret = hook_install_locked(to_install);
|
||||
if (ret != NULL) {
|
||||
tsd_global_slow_inc(tsdn);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &hooks_mu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
hook_remove_locked(seq_hooks_t *to_remove) {
|
||||
hooks_internal_t hooks_internal;
|
||||
bool success = seq_try_load_hooks(&hooks_internal, to_remove);
|
||||
/* We hold mu; no concurrent access. */
|
||||
assert(success);
|
||||
/* Should only remove hooks that were added. */
|
||||
assert(hooks_internal.in_use);
|
||||
hooks_internal.in_use = false;
|
||||
seq_store_hooks(to_remove, &hooks_internal);
|
||||
atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
void
|
||||
hook_remove(tsdn_t *tsdn, void *opaque) {
|
||||
if (config_debug) {
|
||||
char *hooks_begin = (char *)&hooks[0];
|
||||
char *hooks_end = (char *)&hooks[HOOK_MAX];
|
||||
char *hook = (char *)opaque;
|
||||
assert(hooks_begin <= hook && hook < hooks_end
|
||||
&& (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
|
||||
}
|
||||
malloc_mutex_lock(tsdn, &hooks_mu);
|
||||
hook_remove_locked((seq_hooks_t *)opaque);
|
||||
tsd_global_slow_dec(tsdn);
|
||||
malloc_mutex_unlock(tsdn, &hooks_mu);
|
||||
}
|
||||
|
||||
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
|
||||
for (int for_each_hook_counter = 0; \
|
||||
for_each_hook_counter < HOOK_MAX; \
|
||||
for_each_hook_counter++) { \
|
||||
bool for_each_hook_success = seq_try_load_hooks( \
|
||||
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
|
||||
if (!for_each_hook_success) { \
|
||||
continue; \
|
||||
} \
|
||||
if (!(hooks_internal_ptr)->in_use) { \
|
||||
continue; \
|
||||
}
|
||||
#define FOR_EACH_HOOK_END \
|
||||
}
|
||||
|
||||
static bool *
|
||||
hook_reentrantp() {
|
||||
/*
|
||||
* We prevent user reentrancy within hooks. This is basically just a
|
||||
* thread-local bool that triggers an early-exit.
|
||||
*
|
||||
* We don't fold in_hook into reentrancy. There are two reasons for
|
||||
* this:
|
||||
* - Right now, we turn on reentrancy during things like extent hook
|
||||
* execution. Allocating during extent hooks is not officially
|
||||
* supported, but we don't want to break it for the time being. These
|
||||
* sorts of allocations should probably still be hooked, though.
|
||||
* - If a hook allocates, we may want it to be relatively fast (after
|
||||
* all, it executes on every allocator operation). Turning on
|
||||
* reentrancy is a fairly heavyweight mode (disabling tcache,
|
||||
* redirecting to arena 0, etc.). It's possible we may one day want
|
||||
* to turn on reentrant mode here, if it proves too difficult to keep
|
||||
* this working. But that's fairly easy for us to see; OTOH, people
|
||||
* not using hooks because they're too slow is easy for us to miss.
|
||||
*
|
||||
* The tricky part is
|
||||
* that this code might get invoked even if we don't have access to tsd.
|
||||
* This function mimics getting a pointer to thread-local data, except
|
||||
* that it might secretly return a pointer to some global data if we
|
||||
* know that the caller will take the early-exit path.
|
||||
* If we return a bool that indicates that we are reentrant, then the
|
||||
* caller will go down the early exit path, leaving the global
|
||||
* untouched.
|
||||
*/
|
||||
static bool in_hook_global = true;
|
||||
tsdn_t *tsdn = tsdn_fetch();
|
||||
tcache_t *tcache = tsdn_tcachep_get(tsdn);
|
||||
if (tcache != NULL) {
|
||||
return &tcache->in_hook;
|
||||
}
|
||||
return &in_hook_global;
|
||||
}
|
||||
|
||||
#define HOOK_PROLOGUE \
|
||||
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
|
||||
return; \
|
||||
} \
|
||||
bool *in_hook = hook_reentrantp(); \
|
||||
if (*in_hook) { \
|
||||
return; \
|
||||
} \
|
||||
*in_hook = true;
|
||||
|
||||
#define HOOK_EPILOGUE \
|
||||
*in_hook = false;
|
||||
|
||||
void
|
||||
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
|
||||
uintptr_t args_raw[3]) {
|
||||
HOOK_PROLOGUE
|
||||
|
||||
hooks_internal_t hook;
|
||||
FOR_EACH_HOOK_BEGIN(&hook)
|
||||
hook_alloc h = hook.hooks.alloc_hook;
|
||||
if (h != NULL) {
|
||||
h(hook.hooks.extra, type, result, result_raw, args_raw);
|
||||
}
|
||||
FOR_EACH_HOOK_END
|
||||
|
||||
HOOK_EPILOGUE
|
||||
}
|
||||
|
||||
void
|
||||
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
|
||||
HOOK_PROLOGUE
|
||||
hooks_internal_t hook;
|
||||
FOR_EACH_HOOK_BEGIN(&hook)
|
||||
hook_dalloc h = hook.hooks.dalloc_hook;
|
||||
if (h != NULL) {
|
||||
h(hook.hooks.extra, type, address, args_raw);
|
||||
}
|
||||
FOR_EACH_HOOK_END
|
||||
HOOK_EPILOGUE
|
||||
}
|
||||
|
||||
void
|
||||
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
|
||||
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
|
||||
HOOK_PROLOGUE
|
||||
hooks_internal_t hook;
|
||||
FOR_EACH_HOOK_BEGIN(&hook)
|
||||
hook_expand h = hook.hooks.expand_hook;
|
||||
if (h != NULL) {
|
||||
h(hook.hooks.extra, type, address, old_usize, new_usize,
|
||||
result_raw, args_raw);
|
||||
}
|
||||
FOR_EACH_HOOK_END
|
||||
HOOK_EPILOGUE
|
||||
}
|
||||
12
deps/jemalloc/src/hooks.c
vendored
Normal file
12
deps/jemalloc/src/hooks.c
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
/*
|
||||
* The hooks are a little bit screwy -- they're not genuinely exported in the
|
||||
* sense that we want them available to end-users, but we do want them visible
|
||||
* from outside the generated library, so that we can use them in test code.
|
||||
*/
|
||||
JEMALLOC_EXPORT
|
||||
void (*hooks_arena_new_hook)() = NULL;
|
||||
|
||||
JEMALLOC_EXPORT
|
||||
void (*hooks_libc_hook)() = NULL;
|
||||
347
deps/jemalloc/src/huge.c
vendored
Normal file
347
deps/jemalloc/src/huge.c
vendored
Normal file
@@ -0,0 +1,347 @@
|
||||
#define JEMALLOC_HUGE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
uint64_t huge_nmalloc;
|
||||
uint64_t huge_ndalloc;
|
||||
size_t huge_allocated;
|
||||
|
||||
malloc_mutex_t huge_mtx;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* Tree of chunks that are stand-alone huge allocations. */
|
||||
static extent_tree_t huge;
|
||||
|
||||
void *
|
||||
huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
|
||||
{
|
||||
|
||||
return (huge_palloc(size, chunksize, zero, dss_prec));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
extent_node_t *node;
|
||||
bool is_zeroed;
|
||||
|
||||
/* Allocate one or more contiguous chunks for this request. */
|
||||
|
||||
csize = CHUNK_CEILING(size);
|
||||
if (csize == 0) {
|
||||
/* size is large enough to cause size_t wrap-around. */
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Allocate an extent node with which to track the chunk. */
|
||||
node = base_node_alloc();
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
/*
|
||||
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
||||
* it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
node->addr = ret;
|
||||
node->size = csize;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
if (config_stats) {
|
||||
stats_cactive_add(csize);
|
||||
huge_nmalloc++;
|
||||
huge_allocated += csize;
|
||||
}
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
if (config_fill && zero == false) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, csize);
|
||||
else if (opt_zero && is_zeroed == false)
|
||||
memset(ret, 0, csize);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
||||
{
|
||||
|
||||
/*
|
||||
* Avoid moving the allocation if the size class can be left the same.
|
||||
*/
|
||||
if (oldsize > arena_maxclass
|
||||
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
||||
assert(CHUNK_CEILING(oldsize) == oldsize);
|
||||
return (false);
|
||||
}
|
||||
|
||||
/* Reallocation would require a move. */
|
||||
return (true);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
|
||||
return (ptr);
|
||||
|
||||
/*
|
||||
* size and oldsize are different enough that we need to use a
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(size + extra, alignment, zero, dss_prec);
|
||||
else
|
||||
ret = huge_malloc(size + extra, zero, dss_prec);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
return (NULL);
|
||||
/* Try again, this time without extra. */
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(size, alignment, zero, dss_prec);
|
||||
else
|
||||
ret = huge_malloc(size, zero, dss_prec);
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy at most size bytes (not size+extra), since the caller has no
|
||||
* expectation that the extra bytes will be reliably preserved.
|
||||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
|
||||
#ifdef JEMALLOC_MREMAP
|
||||
/*
|
||||
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
|
||||
* source nor the destination are in dss.
|
||||
*/
|
||||
if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
|
||||
== false && chunk_in_dss(ret) == false))) {
|
||||
size_t newsize = huge_salloc(ret);
|
||||
|
||||
/*
|
||||
* Remove ptr from the tree of huge allocations before
|
||||
* performing the remap operation, in order to avoid the
|
||||
* possibility of another thread acquiring that mapping before
|
||||
* this one removes it from the tree.
|
||||
*/
|
||||
huge_dalloc(ptr, false);
|
||||
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
|
||||
ret) == MAP_FAILED) {
|
||||
/*
|
||||
* Assuming no chunk management bugs in the allocator,
|
||||
* the only documented way an error can occur here is
|
||||
* if the application changed the map type for a
|
||||
* portion of the old allocation. This is firmly in
|
||||
* undefined behavior territory, so write a diagnostic
|
||||
* message, and optionally abort.
|
||||
*/
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
|
||||
buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
memcpy(ret, ptr, copysize);
|
||||
chunk_dealloc_mmap(ptr, oldsize);
|
||||
} else if (config_fill && zero == false && opt_junk && oldsize
|
||||
< newsize) {
|
||||
/*
|
||||
* mremap(2) clobbers the original mapping, so
|
||||
* junk/zero filling is not preserved. There is no
|
||||
* need to zero fill here, since any trailing
|
||||
* uninititialized memory is demand-zeroed by the
|
||||
* kernel, but junk filling must be redone.
|
||||
*/
|
||||
memset(ret + oldsize, 0xa5, newsize - oldsize);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
memcpy(ret, ptr, copysize);
|
||||
iqalloct(ptr, try_tcache_dalloc);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef huge_dalloc_junk
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
||||
#endif
|
||||
static void
|
||||
huge_dalloc_junk(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
if (config_fill && config_dss && opt_junk) {
|
||||
/*
|
||||
* Only bother junk filling if the chunk isn't about to be
|
||||
* unmapped.
|
||||
*/
|
||||
if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
|
||||
memset(ptr, 0x5a, usize);
|
||||
}
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef huge_dalloc_junk
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
||||
#endif
|
||||
|
||||
void
|
||||
huge_dalloc(void *ptr, bool unmap)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = ptr;
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->addr == ptr);
|
||||
extent_tree_ad_remove(&huge, node);
|
||||
|
||||
if (config_stats) {
|
||||
stats_cactive_sub(node->size);
|
||||
huge_ndalloc++;
|
||||
huge_allocated -= node->size;
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
if (unmap)
|
||||
huge_dalloc_junk(node->addr, node->size);
|
||||
|
||||
chunk_dealloc(node->addr, node->size, unmap);
|
||||
|
||||
base_node_dealloc(node);
|
||||
}
|
||||
|
||||
size_t
|
||||
huge_salloc(const void *ptr)
|
||||
{
|
||||
size_t ret;
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
ret = node->size;
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
dss_prec_t
|
||||
huge_dss_prec_get(arena_t *arena)
|
||||
{
|
||||
|
||||
return (arena_dss_prec_get(choose_arena(arena)));
|
||||
}
|
||||
|
||||
prof_ctx_t *
|
||||
huge_prof_ctx_get(const void *ptr)
|
||||
{
|
||||
prof_ctx_t *ret;
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
ret = node->prof_ctx;
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
node->prof_ctx = ctx;
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
}
|
||||
|
||||
bool
|
||||
huge_boot(void)
|
||||
{
|
||||
|
||||
/* Initialize chunks data. */
|
||||
if (malloc_mutex_init(&huge_mtx))
|
||||
return (true);
|
||||
extent_tree_ad_new(&huge);
|
||||
|
||||
if (config_stats) {
|
||||
huge_nmalloc = 0;
|
||||
huge_ndalloc = 0;
|
||||
huge_allocated = 0;
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&huge_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_postfork_parent(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&huge_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_postfork_child(void)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&huge_mtx);
|
||||
}
|
||||
3922
deps/jemalloc/src/jemalloc.c
vendored
Normal file
3922
deps/jemalloc/src/jemalloc.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
141
deps/jemalloc/src/jemalloc_cpp.cpp
vendored
Normal file
141
deps/jemalloc/src/jemalloc_cpp.cpp
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
#include <mutex>
|
||||
#include <new>
|
||||
|
||||
#define JEMALLOC_CPP_CPP_
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// All operators in this file are exported.
|
||||
|
||||
// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
|
||||
// thunk?
|
||||
//
|
||||
// extern __typeof (sdallocx) sdallocx_int
|
||||
// __attribute ((alias ("sdallocx"),
|
||||
// visibility ("hidden")));
|
||||
//
|
||||
// ... but it needs to work with jemalloc namespaces.
|
||||
|
||||
void *operator new(std::size_t size);
|
||||
void *operator new[](std::size_t size);
|
||||
void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
|
||||
void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
|
||||
void operator delete(void *ptr) noexcept;
|
||||
void operator delete[](void *ptr) noexcept;
|
||||
void operator delete(void *ptr, const std::nothrow_t &) noexcept;
|
||||
void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
|
||||
|
||||
#if __cpp_sized_deallocation >= 201309
|
||||
/* C++14's sized-delete operators. */
|
||||
void operator delete(void *ptr, std::size_t size) noexcept;
|
||||
void operator delete[](void *ptr, std::size_t size) noexcept;
|
||||
#endif
|
||||
|
||||
JEMALLOC_NOINLINE
|
||||
static void *
|
||||
handleOOM(std::size_t size, bool nothrow) {
|
||||
void *ptr = nullptr;
|
||||
|
||||
while (ptr == nullptr) {
|
||||
std::new_handler handler;
|
||||
// GCC-4.8 and clang 4.0 do not have std::get_new_handler.
|
||||
{
|
||||
static std::mutex mtx;
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
|
||||
handler = std::set_new_handler(nullptr);
|
||||
std::set_new_handler(handler);
|
||||
}
|
||||
if (handler == nullptr)
|
||||
break;
|
||||
|
||||
try {
|
||||
handler();
|
||||
} catch (const std::bad_alloc &) {
|
||||
break;
|
||||
}
|
||||
|
||||
ptr = je_malloc(size);
|
||||
}
|
||||
|
||||
if (ptr == nullptr && !nothrow)
|
||||
std::__throw_bad_alloc();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template <bool IsNoExcept>
|
||||
JEMALLOC_ALWAYS_INLINE
|
||||
void *
|
||||
newImpl(std::size_t size) noexcept(IsNoExcept) {
|
||||
void *ptr = je_malloc(size);
|
||||
if (likely(ptr != nullptr))
|
||||
return ptr;
|
||||
|
||||
return handleOOM(size, IsNoExcept);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new(std::size_t size) {
|
||||
return newImpl<false>(size);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new[](std::size_t size) {
|
||||
return newImpl<false>(size);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new(std::size_t size, const std::nothrow_t &) noexcept {
|
||||
return newImpl<true>(size);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new[](std::size_t size, const std::nothrow_t &) noexcept {
|
||||
return newImpl<true>(size);
|
||||
}
|
||||
|
||||
void
|
||||
operator delete(void *ptr) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
void
|
||||
operator delete[](void *ptr) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
void
|
||||
operator delete(void *ptr, const std::nothrow_t &) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
|
||||
je_free(ptr);
|
||||
}
|
||||
|
||||
#if __cpp_sized_deallocation >= 201309
|
||||
|
||||
void
|
||||
operator delete(void *ptr, std::size_t size) noexcept {
|
||||
if (unlikely(ptr == nullptr)) {
|
||||
return;
|
||||
}
|
||||
je_sdallocx_noflags(ptr, size);
|
||||
}
|
||||
|
||||
void operator delete[](void *ptr, std::size_t size) noexcept {
|
||||
if (unlikely(ptr == nullptr)) {
|
||||
return;
|
||||
}
|
||||
je_sdallocx_noflags(ptr, size);
|
||||
}
|
||||
|
||||
#endif // __cpp_sized_deallocation
|
||||
395
deps/jemalloc/src/large.c
vendored
Normal file
395
deps/jemalloc/src/large.c
vendored
Normal file
@@ -0,0 +1,395 @@
|
||||
#define JEMALLOC_LARGE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
|
||||
assert(usize == sz_s2u(usize));
|
||||
|
||||
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
|
||||
}
|
||||
|
||||
void *
|
||||
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero) {
|
||||
size_t ausize;
|
||||
extent_t *extent;
|
||||
bool is_zeroed;
|
||||
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
|
||||
|
||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||
|
||||
ausize = sz_sa2u(usize, alignment);
|
||||
if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (config_fill && unlikely(opt_zero)) {
|
||||
zero = true;
|
||||
}
|
||||
/*
|
||||
* Copy zero into is_zeroed and pass the copy when allocating the
|
||||
* extent, so that it is possible to make correct junk/zero fill
|
||||
* decisions below, even if is_zeroed ends up true when zero is false.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
if (likely(!tsdn_null(tsdn))) {
|
||||
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
|
||||
}
|
||||
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
|
||||
arena, usize, alignment, &is_zeroed)) == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* See comments in arena_bin_slabs_full_insert(). */
|
||||
if (!arena_is_auto(arena)) {
|
||||
/* Insert extent into large. */
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
extent_list_append(&arena->large, extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
}
|
||||
if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
|
||||
prof_idump(tsdn);
|
||||
}
|
||||
|
||||
if (zero) {
|
||||
assert(is_zeroed);
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
|
||||
extent_usize_get(extent));
|
||||
}
|
||||
|
||||
arena_decay_tick(tsdn, arena);
|
||||
return extent_addr_get(extent);
|
||||
}
|
||||
|
||||
static void
|
||||
large_dalloc_junk_impl(void *ptr, size_t size) {
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, size);
|
||||
}
|
||||
large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
|
||||
|
||||
static void
|
||||
large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
|
||||
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
||||
/*
|
||||
* Only bother junk filling if the extent isn't about to be
|
||||
* unmapped.
|
||||
*/
|
||||
if (opt_retain || (have_dss && extent_in_dss(ptr))) {
|
||||
large_dalloc_junk(ptr, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
|
||||
large_dalloc_maybe_junk_impl;
|
||||
|
||||
static bool
|
||||
large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
|
||||
|
||||
assert(oldusize > usize);
|
||||
|
||||
if (extent_hooks->split == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Split excess pages. */
|
||||
if (diff != 0) {
|
||||
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
||||
&extent_hooks, extent, usize + sz_large_pad,
|
||||
sz_size2index(usize), false, diff, SC_NSIZES, false);
|
||||
if (trail == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
large_dalloc_maybe_junk(extent_addr_get(trail),
|
||||
extent_size_get(trail));
|
||||
}
|
||||
|
||||
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
|
||||
}
|
||||
|
||||
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
bool zero) {
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t trailsize = usize - oldusize;
|
||||
|
||||
if (extent_hooks->merge == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (config_fill && unlikely(opt_zero)) {
|
||||
zero = true;
|
||||
}
|
||||
/*
|
||||
* Copy zero into is_zeroed_trail and pass the copy when allocating the
|
||||
* extent, so that it is possible to make correct junk/zero fill
|
||||
* decisions below, even if is_zeroed_trail ends up true when zero is
|
||||
* false.
|
||||
*/
|
||||
bool is_zeroed_trail = zero;
|
||||
bool commit = true;
|
||||
extent_t *trail;
|
||||
bool new_mapping;
|
||||
if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_dirty, extent_past_get(extent), trailsize, 0,
|
||||
CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL
|
||||
|| (trail = extents_alloc(tsdn, arena, &extent_hooks,
|
||||
&arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
|
||||
CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) {
|
||||
if (config_stats) {
|
||||
new_mapping = false;
|
||||
}
|
||||
} else {
|
||||
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
|
||||
extent_past_get(extent), trailsize, 0, CACHELINE, false,
|
||||
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
|
||||
return true;
|
||||
}
|
||||
if (config_stats) {
|
||||
new_mapping = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
|
||||
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
|
||||
return true;
|
||||
}
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
szind_t szind = sz_size2index(usize);
|
||||
extent_szind_set(extent, szind);
|
||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_addr_get(extent), szind, false);
|
||||
|
||||
if (config_stats && new_mapping) {
|
||||
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
||||
}
|
||||
|
||||
if (zero) {
|
||||
if (config_cache_oblivious) {
|
||||
/*
|
||||
* Zero the trailing bytes of the original allocation's
|
||||
* last page, since they are in an indeterminate state.
|
||||
* There will always be trailing bytes, because ptr's
|
||||
* offset from the beginning of the extent is a multiple
|
||||
* of CACHELINE in [0 .. PAGE).
|
||||
*/
|
||||
void *zbase = (void *)
|
||||
((uintptr_t)extent_addr_get(extent) + oldusize);
|
||||
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
|
||||
PAGE));
|
||||
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
|
||||
assert(nzero > 0);
|
||||
memset(zbase, 0, nzero);
|
||||
}
|
||||
assert(is_zeroed_trail);
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
|
||||
JEMALLOC_ALLOC_JUNK, usize - oldusize);
|
||||
}
|
||||
|
||||
arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t usize_max, bool zero) {
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(oldusize >= SC_LARGE_MINCLASS
|
||||
&& usize_max >= SC_LARGE_MINCLASS);
|
||||
|
||||
if (usize_max > oldusize) {
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
/* Try again, this time with usize_min. */
|
||||
if (usize_min < usize_max && usize_min > oldusize &&
|
||||
large_ralloc_no_move_expand(tsdn, extent, usize_min,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid moving the allocation if the existing extent size accommodates
|
||||
* the new size.
|
||||
*/
|
||||
if (oldusize >= usize_min && oldusize <= usize_max) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Attempt to shrink the allocation in-place. */
|
||||
if (oldusize > usize_max) {
|
||||
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *
|
||||
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero) {
|
||||
if (alignment <= CACHELINE) {
|
||||
return large_malloc(tsdn, arena, usize, zero);
|
||||
}
|
||||
return large_palloc(tsdn, arena, usize, alignment, zero);
|
||||
}
|
||||
|
||||
void *
|
||||
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache,
|
||||
hook_ralloc_args_t *hook_args) {
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(oldusize >= SC_LARGE_MINCLASS
|
||||
&& usize >= SC_LARGE_MINCLASS);
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
|
||||
hook_invoke_expand(hook_args->is_realloc
|
||||
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
|
||||
usize, (uintptr_t)ptr, hook_args->args);
|
||||
return extent_addr_get(extent);
|
||||
}
|
||||
|
||||
/*
|
||||
* usize and old size are different enough that we need to use a
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
|
||||
zero);
|
||||
if (ret == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hook_invoke_alloc(hook_args->is_realloc
|
||||
? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
|
||||
hook_args->args);
|
||||
hook_invoke_dalloc(hook_args->is_realloc
|
||||
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
|
||||
|
||||
size_t copysize = (usize < oldusize) ? usize : oldusize;
|
||||
memcpy(ret, extent_addr_get(extent), copysize);
|
||||
isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* junked_locked indicates whether the extent's data have been junk-filled, and
|
||||
* whether the arena's large_mtx is currently held.
|
||||
*/
|
||||
static void
|
||||
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool junked_locked) {
|
||||
if (!junked_locked) {
|
||||
/* See comments in arena_bin_slabs_full_insert(). */
|
||||
if (!arena_is_auto(arena)) {
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
extent_list_remove(&arena->large, extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
}
|
||||
large_dalloc_maybe_junk(extent_addr_get(extent),
|
||||
extent_usize_get(extent));
|
||||
} else {
|
||||
/* Only hold the large_mtx if necessary. */
|
||||
if (!arena_is_auto(arena)) {
|
||||
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
|
||||
extent_list_remove(&arena->large, extent);
|
||||
}
|
||||
}
|
||||
arena_extent_dalloc_large_prep(tsdn, arena, extent);
|
||||
}
|
||||
|
||||
static void
|
||||
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
large_dalloc_prep_impl(tsdn, arena, extent, false);
|
||||
large_dalloc_finish_impl(tsdn, arena, extent);
|
||||
arena_decay_tick(tsdn, arena);
|
||||
}
|
||||
|
||||
size_t
|
||||
large_salloc(tsdn_t *tsdn, const extent_t *extent) {
|
||||
return extent_usize_get(extent);
|
||||
}
|
||||
|
||||
prof_tctx_t *
|
||||
large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
|
||||
return extent_prof_tctx_get(extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
|
||||
extent_prof_tctx_set(extent, tctx);
|
||||
}
|
||||
|
||||
void
|
||||
large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
||||
nstime_t
|
||||
large_prof_alloc_time_get(const extent_t *extent) {
|
||||
return extent_prof_alloc_time_get(extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_prof_alloc_time_set(extent_t *extent, nstime_t t) {
|
||||
extent_prof_alloc_time_set(extent, t);
|
||||
}
|
||||
78
deps/jemalloc/src/log.c
vendored
Normal file
78
deps/jemalloc/src/log.c
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/log.h"
|
||||
|
||||
char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
|
||||
atomic_b_t log_init_done = ATOMIC_INIT(false);
|
||||
|
||||
/*
|
||||
* Returns true if we were able to pick out a segment. Fills in r_segment_end
|
||||
* with a pointer to the first character after the end of the string.
|
||||
*/
|
||||
static const char *
|
||||
log_var_extract_segment(const char* segment_begin) {
|
||||
const char *end;
|
||||
for (end = segment_begin; *end != '\0' && *end != '|'; end++) {
|
||||
}
|
||||
return end;
|
||||
}
|
||||
|
||||
static bool
|
||||
log_var_matches_segment(const char *segment_begin, const char *segment_end,
|
||||
const char *log_var_begin, const char *log_var_end) {
|
||||
assert(segment_begin <= segment_end);
|
||||
assert(log_var_begin < log_var_end);
|
||||
|
||||
ptrdiff_t segment_len = segment_end - segment_begin;
|
||||
ptrdiff_t log_var_len = log_var_end - log_var_begin;
|
||||
/* The special '.' segment matches everything. */
|
||||
if (segment_len == 1 && *segment_begin == '.') {
|
||||
return true;
|
||||
}
|
||||
if (segment_len == log_var_len) {
|
||||
return strncmp(segment_begin, log_var_begin, segment_len) == 0;
|
||||
} else if (segment_len < log_var_len) {
|
||||
return strncmp(segment_begin, log_var_begin, segment_len) == 0
|
||||
&& log_var_begin[segment_len] == '.';
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned
|
||||
log_var_update_state(log_var_t *log_var) {
|
||||
const char *log_var_begin = log_var->name;
|
||||
const char *log_var_end = log_var->name + strlen(log_var->name);
|
||||
|
||||
/* Pointer to one before the beginning of the current segment. */
|
||||
const char *segment_begin = log_var_names;
|
||||
|
||||
/*
|
||||
* If log_init done is false, we haven't parsed the malloc conf yet. To
|
||||
* avoid log-spew, we default to not displaying anything.
|
||||
*/
|
||||
if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) {
|
||||
return LOG_INITIALIZED_NOT_ENABLED;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const char *segment_end = log_var_extract_segment(
|
||||
segment_begin);
|
||||
assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE);
|
||||
if (log_var_matches_segment(segment_begin, segment_end,
|
||||
log_var_begin, log_var_end)) {
|
||||
atomic_store_u(&log_var->state, LOG_ENABLED,
|
||||
ATOMIC_RELAXED);
|
||||
return LOG_ENABLED;
|
||||
}
|
||||
if (*segment_end == '\0') {
|
||||
/* Hit the end of the segment string with no match. */
|
||||
atomic_store_u(&log_var->state,
|
||||
LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED);
|
||||
return LOG_INITIALIZED_NOT_ENABLED;
|
||||
}
|
||||
/* Otherwise, skip the delimiter and continue. */
|
||||
segment_begin = segment_end + 1;
|
||||
}
|
||||
}
|
||||
675
deps/jemalloc/src/malloc_io.c
vendored
Normal file
675
deps/jemalloc/src/malloc_io.c
vendored
Normal file
@@ -0,0 +1,675 @@
|
||||
#define JEMALLOC_MALLOC_IO_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
#ifdef assert
|
||||
# undef assert
|
||||
#endif
|
||||
#ifdef not_reached
|
||||
# undef not_reached
|
||||
#endif
|
||||
#ifdef not_implemented
|
||||
# undef not_implemented
|
||||
#endif
|
||||
#ifdef assert_not_implemented
|
||||
# undef assert_not_implemented
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define simple versions of assertion macros that won't recurse in case
|
||||
* of assertion failures in malloc_*printf().
|
||||
*/
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
malloc_write("<jemalloc>: Failed assertion\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_write("<jemalloc>: Unreachable code reached\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_write("<jemalloc>: Not implemented\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
not_implemented(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void wrtmessage(void *cbopaque, const char *s);
|
||||
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
|
||||
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
|
||||
size_t *slen_p);
|
||||
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
|
||||
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
|
||||
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
|
||||
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
|
||||
size_t *slen_p);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* malloc_message() setup. */
|
||||
static void
|
||||
wrtmessage(void *cbopaque, const char *s) {
|
||||
malloc_write_fd(STDERR_FILENO, s, strlen(s));
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
|
||||
|
||||
/*
|
||||
* Wrapper around malloc_message() that avoids the need for
|
||||
* je_malloc_message(...) throughout the code.
|
||||
*/
|
||||
void
|
||||
malloc_write(const char *s) {
|
||||
if (je_malloc_message != NULL) {
|
||||
je_malloc_message(NULL, s);
|
||||
} else {
|
||||
wrtmessage(NULL, s);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
|
||||
* provide a wrapper.
|
||||
*/
|
||||
int
|
||||
buferror(int err, char *buf, size_t buflen) {
|
||||
#ifdef _WIN32
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
|
||||
(LPSTR)buf, (DWORD)buflen, NULL);
|
||||
return 0;
|
||||
#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
|
||||
char *b = strerror_r(err, buf, buflen);
|
||||
if (b != buf) {
|
||||
strncpy(buf, b, buflen);
|
||||
buf[buflen-1] = '\0';
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return strerror_r(err, buf, buflen);
|
||||
#endif
|
||||
}
|
||||
|
||||
uintmax_t
|
||||
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
|
||||
uintmax_t ret, digit;
|
||||
unsigned b;
|
||||
bool neg;
|
||||
const char *p, *ns;
|
||||
|
||||
p = nptr;
|
||||
if (base < 0 || base == 1 || base > 36) {
|
||||
ns = p;
|
||||
set_errno(EINVAL);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
b = base;
|
||||
|
||||
/* Swallow leading whitespace and get sign, if any. */
|
||||
neg = false;
|
||||
while (true) {
|
||||
switch (*p) {
|
||||
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
|
||||
p++;
|
||||
break;
|
||||
case '-':
|
||||
neg = true;
|
||||
/* Fall through. */
|
||||
case '+':
|
||||
p++;
|
||||
/* Fall through. */
|
||||
default:
|
||||
goto label_prefix;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get prefix, if any. */
|
||||
label_prefix:
|
||||
/*
|
||||
* Note where the first non-whitespace/sign character is so that it is
|
||||
* possible to tell whether any digits are consumed (e.g., " 0" vs.
|
||||
* " -x").
|
||||
*/
|
||||
ns = p;
|
||||
if (*p == '0') {
|
||||
switch (p[1]) {
|
||||
case '0': case '1': case '2': case '3': case '4': case '5':
|
||||
case '6': case '7':
|
||||
if (b == 0) {
|
||||
b = 8;
|
||||
}
|
||||
if (b == 8) {
|
||||
p++;
|
||||
}
|
||||
break;
|
||||
case 'X': case 'x':
|
||||
switch (p[2]) {
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
case 'A': case 'B': case 'C': case 'D': case 'E':
|
||||
case 'F':
|
||||
case 'a': case 'b': case 'c': case 'd': case 'e':
|
||||
case 'f':
|
||||
if (b == 0) {
|
||||
b = 16;
|
||||
}
|
||||
if (b == 16) {
|
||||
p += 2;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
p++;
|
||||
ret = 0;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
if (b == 0) {
|
||||
b = 10;
|
||||
}
|
||||
|
||||
/* Convert. */
|
||||
ret = 0;
|
||||
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|
||||
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|
||||
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
|
||||
uintmax_t pret = ret;
|
||||
ret *= b;
|
||||
ret += digit;
|
||||
if (ret < pret) {
|
||||
/* Overflow. */
|
||||
set_errno(ERANGE);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
if (neg) {
|
||||
ret = (uintmax_t)(-((intmax_t)ret));
|
||||
}
|
||||
|
||||
if (p == ns) {
|
||||
/* No conversion performed. */
|
||||
set_errno(EINVAL);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
label_return:
|
||||
if (endptr != NULL) {
|
||||
if (p == ns) {
|
||||
/* No characters were converted. */
|
||||
*endptr = (char *)nptr;
|
||||
} else {
|
||||
*endptr = (char *)p;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static char *
|
||||
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
|
||||
unsigned i;
|
||||
|
||||
i = U2S_BUFSIZE - 1;
|
||||
s[i] = '\0';
|
||||
switch (base) {
|
||||
case 10:
|
||||
do {
|
||||
i--;
|
||||
s[i] = "0123456789"[x % (uint64_t)10];
|
||||
x /= (uint64_t)10;
|
||||
} while (x > 0);
|
||||
break;
|
||||
case 16: {
|
||||
const char *digits = (uppercase)
|
||||
? "0123456789ABCDEF"
|
||||
: "0123456789abcdef";
|
||||
|
||||
do {
|
||||
i--;
|
||||
s[i] = digits[x & 0xf];
|
||||
x >>= 4;
|
||||
} while (x > 0);
|
||||
break;
|
||||
} default: {
|
||||
const char *digits = (uppercase)
|
||||
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
: "0123456789abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
assert(base >= 2 && base <= 36);
|
||||
do {
|
||||
i--;
|
||||
s[i] = digits[x % (uint64_t)base];
|
||||
x /= (uint64_t)base;
|
||||
} while (x > 0);
|
||||
}}
|
||||
|
||||
*slen_p = U2S_BUFSIZE - 1 - i;
|
||||
return &s[i];
|
||||
}
|
||||
|
||||
static char *
|
||||
d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
|
||||
bool neg;
|
||||
|
||||
if ((neg = (x < 0))) {
|
||||
x = -x;
|
||||
}
|
||||
s = u2s(x, 10, false, s, slen_p);
|
||||
if (neg) {
|
||||
sign = '-';
|
||||
}
|
||||
switch (sign) {
|
||||
case '-':
|
||||
if (!neg) {
|
||||
break;
|
||||
}
|
||||
/* Fall through. */
|
||||
case ' ':
|
||||
case '+':
|
||||
s--;
|
||||
(*slen_p)++;
|
||||
*s = sign;
|
||||
break;
|
||||
default: not_reached();
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static char *
|
||||
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
|
||||
s = u2s(x, 8, false, s, slen_p);
|
||||
if (alt_form && *s != '0') {
|
||||
s--;
|
||||
(*slen_p)++;
|
||||
*s = '0';
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static char *
|
||||
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
|
||||
s = u2s(x, 16, uppercase, s, slen_p);
|
||||
if (alt_form) {
|
||||
s -= 2;
|
||||
(*slen_p) += 2;
|
||||
memcpy(s, uppercase ? "0X" : "0x", 2);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t
|
||||
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
|
||||
size_t i;
|
||||
const char *f;
|
||||
|
||||
#define APPEND_C(c) do { \
|
||||
if (i < size) { \
|
||||
str[i] = (c); \
|
||||
} \
|
||||
i++; \
|
||||
} while (0)
|
||||
#define APPEND_S(s, slen) do { \
|
||||
if (i < size) { \
|
||||
size_t cpylen = (slen <= size - i) ? slen : size - i; \
|
||||
memcpy(&str[i], s, cpylen); \
|
||||
} \
|
||||
i += slen; \
|
||||
} while (0)
|
||||
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
|
||||
/* Left padding. */ \
|
||||
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
|
||||
(size_t)width - slen : 0); \
|
||||
if (!left_justify && pad_len != 0) { \
|
||||
size_t j; \
|
||||
for (j = 0; j < pad_len; j++) { \
|
||||
APPEND_C(' '); \
|
||||
} \
|
||||
} \
|
||||
/* Value. */ \
|
||||
APPEND_S(s, slen); \
|
||||
/* Right padding. */ \
|
||||
if (left_justify && pad_len != 0) { \
|
||||
size_t j; \
|
||||
for (j = 0; j < pad_len; j++) { \
|
||||
APPEND_C(' '); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
#define GET_ARG_NUMERIC(val, len) do { \
|
||||
switch ((unsigned char)len) { \
|
||||
case '?': \
|
||||
val = va_arg(ap, int); \
|
||||
break; \
|
||||
case '?' | 0x80: \
|
||||
val = va_arg(ap, unsigned int); \
|
||||
break; \
|
||||
case 'l': \
|
||||
val = va_arg(ap, long); \
|
||||
break; \
|
||||
case 'l' | 0x80: \
|
||||
val = va_arg(ap, unsigned long); \
|
||||
break; \
|
||||
case 'q': \
|
||||
val = va_arg(ap, long long); \
|
||||
break; \
|
||||
case 'q' | 0x80: \
|
||||
val = va_arg(ap, unsigned long long); \
|
||||
break; \
|
||||
case 'j': \
|
||||
val = va_arg(ap, intmax_t); \
|
||||
break; \
|
||||
case 'j' | 0x80: \
|
||||
val = va_arg(ap, uintmax_t); \
|
||||
break; \
|
||||
case 't': \
|
||||
val = va_arg(ap, ptrdiff_t); \
|
||||
break; \
|
||||
case 'z': \
|
||||
val = va_arg(ap, ssize_t); \
|
||||
break; \
|
||||
case 'z' | 0x80: \
|
||||
val = va_arg(ap, size_t); \
|
||||
break; \
|
||||
case 'p': /* Synthetic; used for %p. */ \
|
||||
val = va_arg(ap, uintptr_t); \
|
||||
break; \
|
||||
default: \
|
||||
not_reached(); \
|
||||
val = 0; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
i = 0;
|
||||
f = format;
|
||||
while (true) {
|
||||
switch (*f) {
|
||||
case '\0': goto label_out;
|
||||
case '%': {
|
||||
bool alt_form = false;
|
||||
bool left_justify = false;
|
||||
bool plus_space = false;
|
||||
bool plus_plus = false;
|
||||
int prec = -1;
|
||||
int width = -1;
|
||||
unsigned char len = '?';
|
||||
char *s;
|
||||
size_t slen;
|
||||
|
||||
f++;
|
||||
/* Flags. */
|
||||
while (true) {
|
||||
switch (*f) {
|
||||
case '#':
|
||||
assert(!alt_form);
|
||||
alt_form = true;
|
||||
break;
|
||||
case '-':
|
||||
assert(!left_justify);
|
||||
left_justify = true;
|
||||
break;
|
||||
case ' ':
|
||||
assert(!plus_space);
|
||||
plus_space = true;
|
||||
break;
|
||||
case '+':
|
||||
assert(!plus_plus);
|
||||
plus_plus = true;
|
||||
break;
|
||||
default: goto label_width;
|
||||
}
|
||||
f++;
|
||||
}
|
||||
/* Width. */
|
||||
label_width:
|
||||
switch (*f) {
|
||||
case '*':
|
||||
width = va_arg(ap, int);
|
||||
f++;
|
||||
if (width < 0) {
|
||||
left_justify = true;
|
||||
width = -width;
|
||||
}
|
||||
break;
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9': {
|
||||
uintmax_t uwidth;
|
||||
set_errno(0);
|
||||
uwidth = malloc_strtoumax(f, (char **)&f, 10);
|
||||
assert(uwidth != UINTMAX_MAX || get_errno() !=
|
||||
ERANGE);
|
||||
width = (int)uwidth;
|
||||
break;
|
||||
} default:
|
||||
break;
|
||||
}
|
||||
/* Width/precision separator. */
|
||||
if (*f == '.') {
|
||||
f++;
|
||||
} else {
|
||||
goto label_length;
|
||||
}
|
||||
/* Precision. */
|
||||
switch (*f) {
|
||||
case '*':
|
||||
prec = va_arg(ap, int);
|
||||
f++;
|
||||
break;
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9': {
|
||||
uintmax_t uprec;
|
||||
set_errno(0);
|
||||
uprec = malloc_strtoumax(f, (char **)&f, 10);
|
||||
assert(uprec != UINTMAX_MAX || get_errno() !=
|
||||
ERANGE);
|
||||
prec = (int)uprec;
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
}
|
||||
/* Length. */
|
||||
label_length:
|
||||
switch (*f) {
|
||||
case 'l':
|
||||
f++;
|
||||
if (*f == 'l') {
|
||||
len = 'q';
|
||||
f++;
|
||||
} else {
|
||||
len = 'l';
|
||||
}
|
||||
break;
|
||||
case 'q': case 'j': case 't': case 'z':
|
||||
len = *f;
|
||||
f++;
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
/* Conversion specifier. */
|
||||
switch (*f) {
|
||||
case '%':
|
||||
/* %% */
|
||||
APPEND_C(*f);
|
||||
f++;
|
||||
break;
|
||||
case 'd': case 'i': {
|
||||
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[D2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len);
|
||||
s = d2s(val, (plus_plus ? '+' : (plus_space ?
|
||||
' ' : '-')), buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'o': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[O2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = o2s(val, alt_form, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'u': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[U2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = u2s(val, 10, false, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'x': case 'X': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[X2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = x2s(val, alt_form, *f == 'X', buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'c': {
|
||||
unsigned char val;
|
||||
char buf[2];
|
||||
|
||||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
val = va_arg(ap, int);
|
||||
buf[0] = val;
|
||||
buf[1] = '\0';
|
||||
APPEND_PADDED_S(buf, 1, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 's':
|
||||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
s = va_arg(ap, char *);
|
||||
slen = (prec < 0) ? strlen(s) : (size_t)prec;
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
case 'p': {
|
||||
uintmax_t val;
|
||||
char buf[X2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, 'p');
|
||||
s = x2s(val, true, false, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} default: not_reached();
|
||||
}
|
||||
break;
|
||||
} default: {
|
||||
APPEND_C(*f);
|
||||
f++;
|
||||
break;
|
||||
}}
|
||||
}
|
||||
label_out:
|
||||
if (i < size) {
|
||||
str[i] = '\0';
|
||||
} else {
|
||||
str[size - 1] = '\0';
|
||||
}
|
||||
|
||||
#undef APPEND_C
|
||||
#undef APPEND_S
|
||||
#undef APPEND_PADDED_S
|
||||
#undef GET_ARG_NUMERIC
|
||||
return i;
|
||||
}
|
||||
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4)
|
||||
size_t
|
||||
malloc_snprintf(char *str, size_t size, const char *format, ...) {
|
||||
size_t ret;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
ret = malloc_vsnprintf(str, size, format, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap) {
|
||||
char buf[MALLOC_PRINTF_BUFSIZE];
|
||||
|
||||
if (write_cb == NULL) {
|
||||
/*
|
||||
* The caller did not provide an alternate write_cb callback
|
||||
* function, so use the default one. malloc_write() is an
|
||||
* inline function, so use malloc_message() directly here.
|
||||
*/
|
||||
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
|
||||
wrtmessage;
|
||||
}
|
||||
|
||||
malloc_vsnprintf(buf, sizeof(buf), format, ap);
|
||||
write_cb(cbopaque, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print to a callback function in such a way as to (hopefully) avoid memory
|
||||
* allocation.
|
||||
*/
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4)
|
||||
void
|
||||
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(write_cb, cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/* Print to stderr in such a way as to avoid memory allocation. */
|
||||
JEMALLOC_FORMAT_PRINTF(1, 2)
|
||||
void
|
||||
malloc_printf(const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(NULL, NULL, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore normal assertion macros, in order to make it possible to compile all
|
||||
* C files as a single concatenation.
|
||||
*/
|
||||
#undef assert
|
||||
#undef not_reached
|
||||
#undef not_implemented
|
||||
#undef assert_not_implemented
|
||||
#include "jemalloc/internal/assert.h"
|
||||
2
deps/jemalloc/src/mb.c
vendored
Normal file
2
deps/jemalloc/src/mb.c
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
#define JEMALLOC_MB_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
223
deps/jemalloc/src/mutex.c
vendored
Normal file
223
deps/jemalloc/src/mutex.c
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
#define JEMALLOC_MUTEX_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/spin.h"
|
||||
|
||||
#ifndef _CRT_SPINCOUNT
|
||||
#define _CRT_SPINCOUNT 4000
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
bool isthreaded = false;
|
||||
#endif
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
static bool postpone_init = true;
|
||||
static malloc_mutex_t *postponed_mutexes = NULL;
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* We intercept pthread_create() calls in order to toggle isthreaded if the
|
||||
* process goes multi-threaded.
|
||||
*/
|
||||
|
||||
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||
JEMALLOC_EXPORT int
|
||||
pthread_create(pthread_t *__restrict thread,
|
||||
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
|
||||
void *__restrict arg) {
|
||||
return pthread_create_wrapper(thread, attr, start_routine, arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||
void *(calloc_cb)(size_t, size_t));
|
||||
#endif
|
||||
|
||||
void
|
||||
malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
|
||||
mutex_prof_data_t *data = &mutex->prof_data;
|
||||
nstime_t before = NSTIME_ZERO_INITIALIZER;
|
||||
|
||||
if (ncpus == 1) {
|
||||
goto label_spin_done;
|
||||
}
|
||||
|
||||
int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
|
||||
do {
|
||||
spin_cpu_spinwait();
|
||||
if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
|
||||
&& !malloc_mutex_trylock_final(mutex)) {
|
||||
data->n_spin_acquired++;
|
||||
return;
|
||||
}
|
||||
} while (cnt++ < max_cnt);
|
||||
|
||||
if (!config_stats) {
|
||||
/* Only spin is useful when stats is off. */
|
||||
malloc_mutex_lock_final(mutex);
|
||||
return;
|
||||
}
|
||||
label_spin_done:
|
||||
nstime_update(&before);
|
||||
/* Copy before to after to avoid clock skews. */
|
||||
nstime_t after;
|
||||
nstime_copy(&after, &before);
|
||||
uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
|
||||
ATOMIC_RELAXED) + 1;
|
||||
/* One last try as above two calls may take quite some cycles. */
|
||||
if (!malloc_mutex_trylock_final(mutex)) {
|
||||
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
|
||||
data->n_spin_acquired++;
|
||||
return;
|
||||
}
|
||||
|
||||
/* True slow path. */
|
||||
malloc_mutex_lock_final(mutex);
|
||||
/* Update more slow-path only counters. */
|
||||
atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
|
||||
nstime_update(&after);
|
||||
|
||||
nstime_t delta;
|
||||
nstime_copy(&delta, &after);
|
||||
nstime_subtract(&delta, &before);
|
||||
|
||||
data->n_wait_times++;
|
||||
nstime_add(&data->tot_wait_time, &delta);
|
||||
if (nstime_compare(&data->max_wait_time, &delta) < 0) {
|
||||
nstime_copy(&data->max_wait_time, &delta);
|
||||
}
|
||||
if (n_thds > data->max_n_thds) {
|
||||
data->max_n_thds = n_thds;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mutex_prof_data_init(mutex_prof_data_t *data) {
|
||||
memset(data, 0, sizeof(mutex_prof_data_t));
|
||||
nstime_init(&data->max_wait_time, 0);
|
||||
nstime_init(&data->tot_wait_time, 0);
|
||||
data->prev_owner = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
malloc_mutex_assert_owner(tsdn, mutex);
|
||||
mutex_prof_data_init(&mutex->prof_data);
|
||||
}
|
||||
|
||||
static int
|
||||
mutex_addr_comp(const witness_t *witness1, void *mutex1,
|
||||
const witness_t *witness2, void *mutex2) {
|
||||
assert(mutex1 != NULL);
|
||||
assert(mutex2 != NULL);
|
||||
uintptr_t mu1int = (uintptr_t)mutex1;
|
||||
uintptr_t mu2int = (uintptr_t)mutex2;
|
||||
if (mu1int < mu2int) {
|
||||
return -1;
|
||||
} else if (mu1int == mu2int) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||
witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
|
||||
mutex_prof_data_init(&mutex->prof_data);
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
InitializeSRWLock(&mutex->lock);
|
||||
# else
|
||||
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
|
||||
_CRT_SPINCOUNT)) {
|
||||
return true;
|
||||
}
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
mutex->lock = OS_UNFAIR_LOCK_INIT;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
if (postpone_init) {
|
||||
mutex->postponed_next = postponed_mutexes;
|
||||
postponed_mutexes = mutex;
|
||||
} else {
|
||||
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
|
||||
bootstrap_calloc) != 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#else
|
||||
pthread_mutexattr_t attr;
|
||||
|
||||
if (pthread_mutexattr_init(&attr) != 0) {
|
||||
return true;
|
||||
}
|
||||
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
|
||||
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
return true;
|
||||
}
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
#endif
|
||||
if (config_debug) {
|
||||
mutex->lock_order = lock_order;
|
||||
if (lock_order == malloc_mutex_address_ordered) {
|
||||
witness_init(&mutex->witness, name, rank,
|
||||
mutex_addr_comp, mutex);
|
||||
} else {
|
||||
witness_init(&mutex->witness, name, rank, NULL, NULL);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
malloc_mutex_lock(tsdn, mutex);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
#else
|
||||
if (malloc_mutex_init(mutex, mutex->witness.name,
|
||||
mutex->witness.rank, mutex->lock_order)) {
|
||||
malloc_printf("<jemalloc>: Error re-initializing mutex in "
|
||||
"child\n");
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
malloc_mutex_boot(void) {
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
postpone_init = false;
|
||||
while (postponed_mutexes != NULL) {
|
||||
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
|
||||
bootstrap_calloc) != 0) {
|
||||
return true;
|
||||
}
|
||||
postponed_mutexes = postponed_mutexes->postponed_next;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
18
deps/jemalloc/src/mutex_pool.c
vendored
Normal file
18
deps/jemalloc/src/mutex_pool.c
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
#define JEMALLOC_MUTEX_POOL_C_
|
||||
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_pool.h"
|
||||
|
||||
bool
|
||||
mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
|
||||
for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
|
||||
if (malloc_mutex_init(&pool->mutexes[i], name, rank,
|
||||
malloc_mutex_address_ordered)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
170
deps/jemalloc/src/nstime.c
vendored
Normal file
170
deps/jemalloc/src/nstime.c
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/nstime.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
#define BILLION UINT64_C(1000000000)
|
||||
#define MILLION UINT64_C(1000000)
|
||||
|
||||
void
|
||||
nstime_init(nstime_t *time, uint64_t ns) {
|
||||
time->ns = ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
|
||||
time->ns = sec * BILLION + nsec;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_ns(const nstime_t *time) {
|
||||
return time->ns;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_msec(const nstime_t *time) {
|
||||
return time->ns / MILLION;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_sec(const nstime_t *time) {
|
||||
return time->ns / BILLION;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_nsec(const nstime_t *time) {
|
||||
return time->ns % BILLION;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_copy(nstime_t *time, const nstime_t *source) {
|
||||
*time = *source;
|
||||
}
|
||||
|
||||
int
|
||||
nstime_compare(const nstime_t *a, const nstime_t *b) {
|
||||
return (a->ns > b->ns) - (a->ns < b->ns);
|
||||
}
|
||||
|
||||
void
|
||||
nstime_add(nstime_t *time, const nstime_t *addend) {
|
||||
assert(UINT64_MAX - time->ns >= addend->ns);
|
||||
|
||||
time->ns += addend->ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_iadd(nstime_t *time, uint64_t addend) {
|
||||
assert(UINT64_MAX - time->ns >= addend);
|
||||
|
||||
time->ns += addend;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
|
||||
assert(nstime_compare(time, subtrahend) >= 0);
|
||||
|
||||
time->ns -= subtrahend->ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
|
||||
assert(time->ns >= subtrahend);
|
||||
|
||||
time->ns -= subtrahend;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_imultiply(nstime_t *time, uint64_t multiplier) {
|
||||
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
|
||||
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
|
||||
|
||||
time->ns *= multiplier;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_idivide(nstime_t *time, uint64_t divisor) {
|
||||
assert(divisor != 0);
|
||||
|
||||
time->ns /= divisor;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_divide(const nstime_t *time, const nstime_t *divisor) {
|
||||
assert(divisor->ns != 0);
|
||||
|
||||
return time->ns / divisor->ns;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
FILETIME ft;
|
||||
uint64_t ticks_100ns;
|
||||
|
||||
GetSystemTimeAsFileTime(&ft);
|
||||
ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
|
||||
nstime_init(time, ticks_100ns * 100);
|
||||
}
|
||||
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
|
||||
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
nstime_init(time, mach_absolute_time());
|
||||
}
|
||||
#else
|
||||
# define NSTIME_MONOTONIC false
|
||||
static void
|
||||
nstime_get(nstime_t *time) {
|
||||
struct timeval tv;
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool
|
||||
nstime_monotonic_impl(void) {
|
||||
return NSTIME_MONOTONIC;
|
||||
#undef NSTIME_MONOTONIC
|
||||
}
|
||||
nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
|
||||
|
||||
static bool
|
||||
nstime_update_impl(nstime_t *time) {
|
||||
nstime_t old_time;
|
||||
|
||||
nstime_copy(&old_time, time);
|
||||
nstime_get(time);
|
||||
|
||||
/* Handle non-monotonic clocks. */
|
||||
if (unlikely(nstime_compare(&old_time, time) > 0)) {
|
||||
nstime_copy(time, &old_time);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
|
||||
649
deps/jemalloc/src/pages.c
vendored
Normal file
649
deps/jemalloc/src/pages.c
vendored
Normal file
@@ -0,0 +1,649 @@
|
||||
#define JEMALLOC_PAGES_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/pages.h"
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
#include <sys/sysctl.h>
|
||||
#ifdef __FreeBSD__
|
||||
#include <vm/vm_param.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
|
||||
static size_t os_page;
|
||||
|
||||
#ifndef _WIN32
|
||||
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
|
||||
# define PAGES_PROT_DECOMMIT (PROT_NONE)
|
||||
static int mmap_flags;
|
||||
#endif
|
||||
static bool os_overcommits;
|
||||
|
||||
const char *thp_mode_names[] = {
|
||||
"default",
|
||||
"always",
|
||||
"never",
|
||||
"not supported"
|
||||
};
|
||||
thp_mode_t opt_thp = THP_MODE_DEFAULT;
|
||||
thp_mode_t init_system_thp_mode;
|
||||
|
||||
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
|
||||
static bool pages_can_purge_lazy_runtime = true;
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void os_pages_unmap(void *addr, size_t size);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
||||
assert(ALIGNMENT_CEILING(size, os_page) == size);
|
||||
assert(size != 0);
|
||||
|
||||
if (os_overcommits) {
|
||||
*commit = true;
|
||||
}
|
||||
|
||||
void *ret;
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
{
|
||||
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
||||
|
||||
ret = mmap(addr, size, prot, mmap_flags, -1, 0);
|
||||
}
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED) {
|
||||
ret = NULL;
|
||||
} else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
os_pages_unmap(ret, size);
|
||||
ret = NULL;
|
||||
}
|
||||
#endif
|
||||
assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
|
||||
ret == addr));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *
|
||||
os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
|
||||
bool *commit) {
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
#ifdef _WIN32
|
||||
os_pages_unmap(addr, alloc_size);
|
||||
void *new_addr = os_pages_map(ret, size, PAGE, commit);
|
||||
if (new_addr == ret) {
|
||||
return ret;
|
||||
}
|
||||
if (new_addr != NULL) {
|
||||
os_pages_unmap(new_addr, size);
|
||||
}
|
||||
return NULL;
|
||||
#else
|
||||
size_t trailsize = alloc_size - leadsize - size;
|
||||
|
||||
if (leadsize != 0) {
|
||||
os_pages_unmap(addr, leadsize);
|
||||
}
|
||||
if (trailsize != 0) {
|
||||
os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
}
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
os_pages_unmap(void *addr, size_t size) {
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
||||
assert(ALIGNMENT_CEILING(size, os_page) == size);
|
||||
|
||||
#ifdef _WIN32
|
||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||
#else
|
||||
if (munmap(addr, size) == -1)
|
||||
#endif
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
#else
|
||||
"munmap"
|
||||
#endif
|
||||
"(): %s\n", buf);
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
pages_map_slow(size_t size, size_t alignment, bool *commit) {
|
||||
size_t alloc_size = size + alignment - os_page;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *ret;
|
||||
do {
|
||||
void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
|
||||
if (pages == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
|
||||
- (uintptr_t)pages;
|
||||
ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
|
||||
} while (ret == NULL);
|
||||
|
||||
assert(ret != NULL);
|
||||
assert(PAGE_ADDR2BASE(ret) == ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *
|
||||
pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
|
||||
assert(alignment >= PAGE);
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
|
||||
|
||||
#if defined(__FreeBSD__) && defined(MAP_EXCL)
|
||||
/*
|
||||
* FreeBSD has mechanisms both to mmap at specific address without
|
||||
* touching existing mappings, and to mmap with specific alignment.
|
||||
*/
|
||||
{
|
||||
if (os_overcommits) {
|
||||
*commit = true;
|
||||
}
|
||||
|
||||
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
||||
int flags = mmap_flags;
|
||||
|
||||
if (addr != NULL) {
|
||||
flags |= MAP_FIXED | MAP_EXCL;
|
||||
} else {
|
||||
unsigned alignment_bits = ffs_zu(alignment);
|
||||
assert(alignment_bits > 1);
|
||||
flags |= MAP_ALIGNED(alignment_bits - 1);
|
||||
}
|
||||
|
||||
void *ret = mmap(addr, size, prot, flags, -1, 0);
|
||||
if (ret == MAP_FAILED) {
|
||||
ret = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Ideally, there would be a way to specify alignment to mmap() (like
|
||||
* NetBSD has), but in the absence of such a feature, we have to work
|
||||
* hard to efficiently create aligned mappings. The reliable, but
|
||||
* slow method is to create a mapping that is over-sized, then trim the
|
||||
* excess. However, that always results in one or two calls to
|
||||
* os_pages_unmap(), and it can leave holes in the process's virtual
|
||||
* memory map if memory grows downward.
|
||||
*
|
||||
* Optimistically try mapping precisely the right amount before falling
|
||||
* back to the slow method, with the expectation that the optimistic
|
||||
* approach works most of the time.
|
||||
*/
|
||||
|
||||
void *ret = os_pages_map(addr, size, os_page, commit);
|
||||
if (ret == NULL || ret == addr) {
|
||||
return ret;
|
||||
}
|
||||
assert(addr == NULL);
|
||||
if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
|
||||
os_pages_unmap(ret, size);
|
||||
return pages_map_slow(size, alignment, commit);
|
||||
}
|
||||
|
||||
assert(PAGE_ADDR2BASE(ret) == ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
pages_unmap(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
os_pages_unmap(addr, size);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (os_overcommits) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
|
||||
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
|
||||
#else
|
||||
{
|
||||
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
||||
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
|
||||
-1, 0);
|
||||
if (result == MAP_FAILED) {
|
||||
return true;
|
||||
}
|
||||
if (result != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right
|
||||
* place.
|
||||
*/
|
||||
os_pages_unmap(result, size);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_commit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, true);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_decommit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, false);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge_lazy(void *addr, size_t size) {
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (!pages_can_purge_lazy) {
|
||||
return true;
|
||||
}
|
||||
if (!pages_can_purge_lazy_runtime) {
|
||||
/*
|
||||
* Built with lazy purge enabled, but detected it was not
|
||||
* supported on the current system.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
return false;
|
||||
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
return (madvise(addr, size,
|
||||
# ifdef MADV_FREE
|
||||
MADV_FREE
|
||||
# else
|
||||
JEMALLOC_MADV_FREE
|
||||
# endif
|
||||
) != 0);
|
||||
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
|
||||
return (madvise(addr, size, MADV_DONTNEED) != 0);
|
||||
#else
|
||||
not_reached();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge_forced(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (!pages_can_purge_forced) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
|
||||
return (madvise(addr, size, MADV_DONTNEED) != 0);
|
||||
#elif defined(JEMALLOC_MAPS_COALESCE)
|
||||
/* Try to overlay a new demand-zeroed mapping. */
|
||||
return pages_commit(addr, size);
|
||||
#else
|
||||
not_reached();
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_huge_impl(void *addr, size_t size, bool aligned) {
|
||||
if (aligned) {
|
||||
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
||||
assert(HUGEPAGE_CEILING(size) == size);
|
||||
}
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_huge(void *addr, size_t size) {
|
||||
return pages_huge_impl(addr, size, true);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_huge_unaligned(void *addr, size_t size) {
|
||||
return pages_huge_impl(addr, size, false);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_nohuge_impl(void *addr, size_t size, bool aligned) {
|
||||
if (aligned) {
|
||||
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
||||
assert(HUGEPAGE_CEILING(size) == size);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_nohuge(void *addr, size_t size) {
|
||||
return pages_nohuge_impl(addr, size, true);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_nohuge_unaligned(void *addr, size_t size) {
|
||||
return pages_nohuge_impl(addr, size, false);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_dontdump(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
#ifdef JEMALLOC_MADVISE_DONTDUMP
|
||||
return madvise(addr, size, MADV_DONTDUMP) != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_dodump(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
#ifdef JEMALLOC_MADVISE_DONTDUMP
|
||||
return madvise(addr, size, MADV_DODUMP) != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
os_page_detect(void) {
|
||||
#ifdef _WIN32
|
||||
SYSTEM_INFO si;
|
||||
GetSystemInfo(&si);
|
||||
return si.dwPageSize;
|
||||
#elif defined(__FreeBSD__)
|
||||
/*
|
||||
* This returns the value obtained from
|
||||
* the auxv vector, avoiding a syscall.
|
||||
*/
|
||||
return getpagesize();
|
||||
#else
|
||||
long result = sysconf(_SC_PAGESIZE);
|
||||
if (result == -1) {
|
||||
return LG_PAGE;
|
||||
}
|
||||
return (size_t)result;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
static bool
|
||||
os_overcommits_sysctl(void) {
|
||||
int vm_overcommit;
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(vm_overcommit);
|
||||
#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
|
||||
int mib[2];
|
||||
|
||||
mib[0] = CTL_VM;
|
||||
mib[1] = VM_OVERCOMMIT;
|
||||
if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
#else
|
||||
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
#endif
|
||||
|
||||
return ((vm_overcommit & 0x3) == 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
/*
|
||||
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
|
||||
* reentry during bootstrapping if another library has interposed system call
|
||||
* wrappers.
|
||||
*/
|
||||
static bool
|
||||
os_overcommits_proc(void) {
|
||||
int fd;
|
||||
char buf[1];
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||
#if defined(O_CLOEXEC)
|
||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
|
||||
O_CLOEXEC);
|
||||
#else
|
||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
if (fd != -1) {
|
||||
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
||||
}
|
||||
#endif
|
||||
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
|
||||
#if defined(O_CLOEXEC)
|
||||
fd = (int)syscall(SYS_openat,
|
||||
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
||||
#else
|
||||
fd = (int)syscall(SYS_openat,
|
||||
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
if (fd != -1) {
|
||||
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
#if defined(O_CLOEXEC)
|
||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
||||
#else
|
||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
if (fd != -1) {
|
||||
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (fd == -1) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
|
||||
ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
||||
syscall(SYS_close, fd);
|
||||
#else
|
||||
close(fd);
|
||||
#endif
|
||||
|
||||
if (nread < 1) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
/*
|
||||
* /proc/sys/vm/overcommit_memory meanings:
|
||||
* 0: Heuristic overcommit.
|
||||
* 1: Always overcommit.
|
||||
* 2: Never overcommit.
|
||||
*/
|
||||
return (buf[0] == '0' || buf[0] == '1');
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
pages_set_thp_state (void *ptr, size_t size) {
|
||||
if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) {
|
||||
return;
|
||||
}
|
||||
assert(opt_thp != thp_mode_not_supported &&
|
||||
init_system_thp_mode != thp_mode_not_supported);
|
||||
|
||||
if (opt_thp == thp_mode_always
|
||||
&& init_system_thp_mode != thp_mode_never) {
|
||||
assert(init_system_thp_mode == thp_mode_default);
|
||||
pages_huge_unaligned(ptr, size);
|
||||
} else if (opt_thp == thp_mode_never) {
|
||||
assert(init_system_thp_mode == thp_mode_default ||
|
||||
init_system_thp_mode == thp_mode_always);
|
||||
pages_nohuge_unaligned(ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
init_thp_state(void) {
|
||||
if (!have_madvise_huge) {
|
||||
if (metadata_thp_enabled() && opt_abort) {
|
||||
malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
|
||||
abort();
|
||||
}
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
static const char sys_state_madvise[] = "always [madvise] never\n";
|
||||
static const char sys_state_always[] = "[always] madvise never\n";
|
||||
static const char sys_state_never[] = "always madvise [never]\n";
|
||||
char buf[sizeof(sys_state_madvise)];
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||
int fd = (int)syscall(SYS_open,
|
||||
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
||||
#else
|
||||
int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
||||
#endif
|
||||
if (fd == -1) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
||||
syscall(SYS_close, fd);
|
||||
#else
|
||||
close(fd);
|
||||
#endif
|
||||
|
||||
if (nread < 0) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
|
||||
init_system_thp_mode = thp_mode_default;
|
||||
} else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) {
|
||||
init_system_thp_mode = thp_mode_always;
|
||||
} else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) {
|
||||
init_system_thp_mode = thp_mode_never;
|
||||
} else {
|
||||
goto label_error;
|
||||
}
|
||||
return;
|
||||
label_error:
|
||||
opt_thp = init_system_thp_mode = thp_mode_not_supported;
|
||||
}
|
||||
|
||||
bool
|
||||
pages_boot(void) {
|
||||
os_page = os_page_detect();
|
||||
if (os_page > PAGE) {
|
||||
malloc_write("<jemalloc>: Unsupported system page size\n");
|
||||
if (opt_abort) {
|
||||
abort();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
mmap_flags = MAP_PRIVATE | MAP_ANON;
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
os_overcommits = os_overcommits_sysctl();
|
||||
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
|
||||
os_overcommits = os_overcommits_proc();
|
||||
# ifdef MAP_NORESERVE
|
||||
if (os_overcommits) {
|
||||
mmap_flags |= MAP_NORESERVE;
|
||||
}
|
||||
# endif
|
||||
#else
|
||||
os_overcommits = false;
|
||||
#endif
|
||||
|
||||
init_thp_state();
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
/*
|
||||
* FreeBSD doesn't need the check; madvise(2) is known to work.
|
||||
*/
|
||||
#else
|
||||
/* Detect lazy purge runtime support. */
|
||||
if (pages_can_purge_lazy) {
|
||||
bool committed = false;
|
||||
void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
|
||||
if (madv_free_page == NULL) {
|
||||
return true;
|
||||
}
|
||||
assert(pages_can_purge_lazy_runtime);
|
||||
if (pages_purge_lazy(madv_free_page, PAGE)) {
|
||||
pages_can_purge_lazy_runtime = false;
|
||||
}
|
||||
os_pages_unmap(madv_free_page, PAGE);
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
3
deps/jemalloc/src/prng.c
vendored
Normal file
3
deps/jemalloc/src/prng.c
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#define JEMALLOC_PRNG_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
3160
deps/jemalloc/src/prof.c
vendored
Normal file
3160
deps/jemalloc/src/prof.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
199
deps/jemalloc/src/quarantine.c
vendored
Normal file
199
deps/jemalloc/src/quarantine.c
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
#define JEMALLOC_QUARANTINE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/*
|
||||
* quarantine pointers close to NULL are used to encode state information that
|
||||
* is used for cleaning up during thread shutdown.
|
||||
*/
|
||||
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
|
||||
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
|
||||
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, quarantine, quarantine_t *, NULL)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
|
||||
static void quarantine_drain_one(quarantine_t *quarantine);
|
||||
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
quarantine_t *
|
||||
quarantine_init(size_t lg_maxobjs)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
|
||||
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
|
||||
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
|
||||
if (quarantine == NULL)
|
||||
return (NULL);
|
||||
quarantine->curbytes = 0;
|
||||
quarantine->curobjs = 0;
|
||||
quarantine->first = 0;
|
||||
quarantine->lg_maxobjs = lg_maxobjs;
|
||||
|
||||
quarantine_tsd_set(&quarantine);
|
||||
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
static quarantine_t *
|
||||
quarantine_grow(quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_t *ret;
|
||||
|
||||
ret = quarantine_init(quarantine->lg_maxobjs + 1);
|
||||
if (ret == NULL) {
|
||||
quarantine_drain_one(quarantine);
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
ret->curbytes = quarantine->curbytes;
|
||||
ret->curobjs = quarantine->curobjs;
|
||||
if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
|
||||
quarantine->lg_maxobjs)) {
|
||||
/* objs ring buffer data are contiguous. */
|
||||
memcpy(ret->objs, &quarantine->objs[quarantine->first],
|
||||
quarantine->curobjs * sizeof(quarantine_obj_t));
|
||||
} else {
|
||||
/* objs ring buffer data wrap around. */
|
||||
size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
|
||||
quarantine->first;
|
||||
size_t ncopy_b = quarantine->curobjs - ncopy_a;
|
||||
|
||||
memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
|
||||
* sizeof(quarantine_obj_t));
|
||||
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
|
||||
sizeof(quarantine_obj_t));
|
||||
}
|
||||
idalloc(quarantine);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain_one(quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
||||
idalloc(obj->ptr);
|
||||
quarantine->curbytes -= obj->usize;
|
||||
quarantine->curobjs--;
|
||||
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
|
||||
quarantine->lg_maxobjs) - 1);
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
|
||||
{
|
||||
|
||||
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
|
||||
quarantine_drain_one(quarantine);
|
||||
}
|
||||
|
||||
void
|
||||
quarantine(void *ptr)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
size_t usize = isalloc(ptr, config_prof);
|
||||
|
||||
cassert(config_fill);
|
||||
assert(opt_quarantine);
|
||||
|
||||
quarantine = *quarantine_tsd_get();
|
||||
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
|
||||
if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that quarantine() was called after
|
||||
* quarantine_cleanup() was called.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_REINCARNATED;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
}
|
||||
idalloc(ptr);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Drain one or more objects if the quarantine size limit would be
|
||||
* exceeded by appending ptr.
|
||||
*/
|
||||
if (quarantine->curbytes + usize > opt_quarantine) {
|
||||
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
|
||||
- usize : 0;
|
||||
quarantine_drain(quarantine, upper_bound);
|
||||
}
|
||||
/* Grow the quarantine ring buffer if it's full. */
|
||||
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
|
||||
quarantine = quarantine_grow(quarantine);
|
||||
/* quarantine_grow() must free a slot if it fails to grow. */
|
||||
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
|
||||
/* Append ptr if its size doesn't exceed the quarantine size. */
|
||||
if (quarantine->curbytes + usize <= opt_quarantine) {
|
||||
size_t offset = (quarantine->first + quarantine->curobjs) &
|
||||
((ZU(1) << quarantine->lg_maxobjs) - 1);
|
||||
quarantine_obj_t *obj = &quarantine->objs[offset];
|
||||
obj->ptr = ptr;
|
||||
obj->usize = usize;
|
||||
quarantine->curbytes += usize;
|
||||
quarantine->curobjs++;
|
||||
if (config_fill && opt_junk) {
|
||||
/*
|
||||
* Only do redzone validation if Valgrind isn't in
|
||||
* operation.
|
||||
*/
|
||||
if ((config_valgrind == false || opt_valgrind == false)
|
||||
&& usize <= SMALL_MAXCLASS)
|
||||
arena_quarantine_junk_small(ptr, usize);
|
||||
else
|
||||
memset(ptr, 0x5a, usize);
|
||||
}
|
||||
} else {
|
||||
assert(quarantine->curbytes == 0);
|
||||
idalloc(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
quarantine_cleanup(void *arg)
|
||||
{
|
||||
quarantine_t *quarantine = *(quarantine_t **)arg;
|
||||
|
||||
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
|
||||
/*
|
||||
* Another destructor deallocated memory after this destructor
|
||||
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
|
||||
* in order to receive another callback.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* The previous time this destructor was called, we set the key
|
||||
* to QUARANTINE_STATE_PURGATORY so that other destructors
|
||||
* wouldn't cause re-creation of the quarantine. This time, do
|
||||
* nothing, so that the destructor will not be called again.
|
||||
*/
|
||||
} else if (quarantine != NULL) {
|
||||
quarantine_drain(quarantine, 0);
|
||||
idalloc(quarantine);
|
||||
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
quarantine_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_fill);
|
||||
|
||||
if (quarantine_tsd_boot())
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
320
deps/jemalloc/src/rtree.c
vendored
Normal file
320
deps/jemalloc/src/rtree.c
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
#define JEMALLOC_RTREE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
/*
|
||||
* Only the most significant bits of keys passed to rtree_{read,write}() are
|
||||
* used.
|
||||
*/
|
||||
bool
|
||||
rtree_new(rtree_t *rtree, bool zeroed) {
|
||||
#ifdef JEMALLOC_JET
|
||||
if (!zeroed) {
|
||||
memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
|
||||
}
|
||||
#else
|
||||
assert(zeroed);
|
||||
#endif
|
||||
|
||||
if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
|
||||
sizeof(rtree_node_elm_t), CACHELINE);
|
||||
}
|
||||
rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
|
||||
|
||||
static void
|
||||
rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
|
||||
/* Nodes are never deleted during normal operation. */
|
||||
not_reached();
|
||||
}
|
||||
rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
|
||||
rtree_node_dalloc_impl;
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
|
||||
return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
|
||||
sizeof(rtree_leaf_elm_t), CACHELINE);
|
||||
}
|
||||
rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
|
||||
|
||||
static void
|
||||
rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
|
||||
/* Leaves are never deleted during normal operation. */
|
||||
not_reached();
|
||||
}
|
||||
rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
|
||||
rtree_leaf_dalloc_impl;
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
# if RTREE_HEIGHT > 1
|
||||
static void
|
||||
rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
|
||||
unsigned level) {
|
||||
size_t nchildren = ZU(1) << rtree_levels[level].bits;
|
||||
if (level + 2 < RTREE_HEIGHT) {
|
||||
for (size_t i = 0; i < nchildren; i++) {
|
||||
rtree_node_elm_t *node =
|
||||
(rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
|
||||
ATOMIC_RELAXED);
|
||||
if (node != NULL) {
|
||||
rtree_delete_subtree(tsdn, rtree, node, level +
|
||||
1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < nchildren; i++) {
|
||||
rtree_leaf_elm_t *leaf =
|
||||
(rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
|
||||
ATOMIC_RELAXED);
|
||||
if (leaf != NULL) {
|
||||
rtree_leaf_dalloc(tsdn, rtree, leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (subtree != rtree->root) {
|
||||
rtree_node_dalloc(tsdn, rtree, subtree);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
|
||||
void
|
||||
rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
|
||||
# if RTREE_HEIGHT > 1
|
||||
rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
|
||||
# endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
|
||||
atomic_p_t *elmp) {
|
||||
malloc_mutex_lock(tsdn, &rtree->init_lock);
|
||||
/*
|
||||
* If *elmp is non-null, then it was initialized with the init lock
|
||||
* held, so we can get by with 'relaxed' here.
|
||||
*/
|
||||
rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
|
||||
if (node == NULL) {
|
||||
node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
|
||||
rtree_levels[level].bits);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Even though we hold the lock, a later reader might not; we
|
||||
* need release semantics.
|
||||
*/
|
||||
atomic_store_p(elmp, node, ATOMIC_RELEASE);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
|
||||
malloc_mutex_lock(tsdn, &rtree->init_lock);
|
||||
/*
|
||||
* If *elmp is non-null, then it was initialized with the init lock
|
||||
* held, so we can get by with 'relaxed' here.
|
||||
*/
|
||||
rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
|
||||
if (leaf == NULL) {
|
||||
leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
|
||||
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||
if (leaf == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Even though we hold the lock, a later reader might not; we
|
||||
* need release semantics.
|
||||
*/
|
||||
atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &rtree->init_lock);
|
||||
|
||||
return leaf;
|
||||
}
|
||||
|
||||
static bool
|
||||
rtree_node_valid(rtree_node_elm_t *node) {
|
||||
return ((uintptr_t)node != (uintptr_t)0);
|
||||
}
|
||||
|
||||
static bool
|
||||
rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
|
||||
return ((uintptr_t)leaf != (uintptr_t)0);
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
|
||||
rtree_node_elm_t *node;
|
||||
|
||||
if (dependent) {
|
||||
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_RELAXED);
|
||||
} else {
|
||||
node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
assert(!dependent || node != NULL);
|
||||
return node;
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
|
||||
unsigned level, bool dependent) {
|
||||
rtree_node_elm_t *node;
|
||||
|
||||
node = rtree_child_node_tryread(elm, dependent);
|
||||
if (!dependent && unlikely(!rtree_node_valid(node))) {
|
||||
node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
|
||||
}
|
||||
assert(!dependent || node != NULL);
|
||||
return node;
|
||||
}
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
|
||||
rtree_leaf_elm_t *leaf;
|
||||
|
||||
if (dependent) {
|
||||
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_RELAXED);
|
||||
} else {
|
||||
leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
assert(!dependent || leaf != NULL);
|
||||
return leaf;
|
||||
}
|
||||
|
||||
static rtree_leaf_elm_t *
|
||||
rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
|
||||
unsigned level, bool dependent) {
|
||||
rtree_leaf_elm_t *leaf;
|
||||
|
||||
leaf = rtree_child_leaf_tryread(elm, dependent);
|
||||
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
|
||||
leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
|
||||
}
|
||||
assert(!dependent || leaf != NULL);
|
||||
return leaf;
|
||||
}
|
||||
|
||||
rtree_leaf_elm_t *
|
||||
rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
rtree_node_elm_t *node;
|
||||
rtree_leaf_elm_t *leaf;
|
||||
#if RTREE_HEIGHT > 1
|
||||
node = rtree->root;
|
||||
#else
|
||||
leaf = rtree->root;
|
||||
#endif
|
||||
|
||||
if (config_debug) {
|
||||
uintptr_t leafkey = rtree_leafkey(key);
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
|
||||
assert(rtree_ctx->cache[i].leafkey != leafkey);
|
||||
}
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||
assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
|
||||
}
|
||||
}
|
||||
|
||||
#define RTREE_GET_CHILD(level) { \
|
||||
assert(level < RTREE_HEIGHT-1); \
|
||||
if (level != 0 && !dependent && \
|
||||
unlikely(!rtree_node_valid(node))) { \
|
||||
return NULL; \
|
||||
} \
|
||||
uintptr_t subkey = rtree_subkey(key, level); \
|
||||
if (level + 2 < RTREE_HEIGHT) { \
|
||||
node = init_missing ? \
|
||||
rtree_child_node_read(tsdn, rtree, \
|
||||
&node[subkey], level, dependent) : \
|
||||
rtree_child_node_tryread(&node[subkey], \
|
||||
dependent); \
|
||||
} else { \
|
||||
leaf = init_missing ? \
|
||||
rtree_child_leaf_read(tsdn, rtree, \
|
||||
&node[subkey], level, dependent) : \
|
||||
rtree_child_leaf_tryread(&node[subkey], \
|
||||
dependent); \
|
||||
} \
|
||||
}
|
||||
/*
|
||||
* Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
|
||||
* (1) evict last entry in L2 cache; (2) move the collision slot from L1
|
||||
* cache down to L2; and 3) fill L1.
|
||||
*/
|
||||
#define RTREE_GET_LEAF(level) { \
|
||||
assert(level == RTREE_HEIGHT-1); \
|
||||
if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
|
||||
return NULL; \
|
||||
} \
|
||||
if (RTREE_CTX_NCACHE_L2 > 1) { \
|
||||
memmove(&rtree_ctx->l2_cache[1], \
|
||||
&rtree_ctx->l2_cache[0], \
|
||||
sizeof(rtree_ctx_cache_elm_t) * \
|
||||
(RTREE_CTX_NCACHE_L2 - 1)); \
|
||||
} \
|
||||
size_t slot = rtree_cache_direct_map(key); \
|
||||
rtree_ctx->l2_cache[0].leafkey = \
|
||||
rtree_ctx->cache[slot].leafkey; \
|
||||
rtree_ctx->l2_cache[0].leaf = \
|
||||
rtree_ctx->cache[slot].leaf; \
|
||||
uintptr_t leafkey = rtree_leafkey(key); \
|
||||
rtree_ctx->cache[slot].leafkey = leafkey; \
|
||||
rtree_ctx->cache[slot].leaf = leaf; \
|
||||
uintptr_t subkey = rtree_subkey(key, level); \
|
||||
return &leaf[subkey]; \
|
||||
}
|
||||
if (RTREE_HEIGHT > 1) {
|
||||
RTREE_GET_CHILD(0)
|
||||
}
|
||||
if (RTREE_HEIGHT > 2) {
|
||||
RTREE_GET_CHILD(1)
|
||||
}
|
||||
if (RTREE_HEIGHT > 3) {
|
||||
for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
|
||||
RTREE_GET_CHILD(i)
|
||||
}
|
||||
}
|
||||
RTREE_GET_LEAF(RTREE_HEIGHT-1)
|
||||
#undef RTREE_GET_CHILD
|
||||
#undef RTREE_GET_LEAF
|
||||
not_reached();
|
||||
}
|
||||
|
||||
void
|
||||
rtree_ctx_data_init(rtree_ctx_t *ctx) {
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
|
||||
rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
|
||||
cache->leafkey = RTREE_LEAFKEY_INVALID;
|
||||
cache->leaf = NULL;
|
||||
}
|
||||
for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||
rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
|
||||
cache->leafkey = RTREE_LEAFKEY_INVALID;
|
||||
cache->leaf = NULL;
|
||||
}
|
||||
}
|
||||
24
deps/jemalloc/src/safety_check.c
vendored
Normal file
24
deps/jemalloc/src/safety_check.c
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
static void (*safety_check_abort)(const char *message);
|
||||
|
||||
void safety_check_set_abort(void (*abort_fn)(const char *)) {
|
||||
safety_check_abort = abort_fn;
|
||||
}
|
||||
|
||||
void safety_check_fail(const char *format, ...) {
|
||||
char buf[MALLOC_PRINTF_BUFSIZE];
|
||||
|
||||
va_list ap;
|
||||
va_start(ap, format);
|
||||
malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap);
|
||||
va_end(ap);
|
||||
|
||||
if (safety_check_abort == NULL) {
|
||||
malloc_write(buf);
|
||||
abort();
|
||||
} else {
|
||||
safety_check_abort(buf);
|
||||
}
|
||||
}
|
||||
313
deps/jemalloc/src/sc.c
vendored
Normal file
313
deps/jemalloc/src/sc.c
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/bit_util.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
/*
|
||||
* This module computes the size classes used to satisfy allocations. The logic
|
||||
* here was ported more or less line-by-line from a shell script, and because of
|
||||
* that is not the most idiomatic C. Eventually we should fix this, but for now
|
||||
* at least the damage is compartmentalized to this file.
|
||||
*/
|
||||
|
||||
sc_data_t sc_data_global;
|
||||
|
||||
static size_t
|
||||
reg_size_compute(int lg_base, int lg_delta, int ndelta) {
|
||||
return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
|
||||
}
|
||||
|
||||
/* Returns the number of pages in the slab. */
|
||||
static int
|
||||
slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
|
||||
size_t page = (ZU(1) << lg_page);
|
||||
size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta);
|
||||
|
||||
size_t try_slab_size = page;
|
||||
size_t try_nregs = try_slab_size / reg_size;
|
||||
size_t perfect_slab_size = 0;
|
||||
bool perfect = false;
|
||||
/*
|
||||
* This loop continues until we find the least common multiple of the
|
||||
* page size and size class size. Size classes are all of the form
|
||||
* base + ndelta * delta == (ndelta + base/ndelta) * delta, which is
|
||||
* (ndelta + ngroup) * delta. The way we choose slabbing strategies
|
||||
* means that delta is at most the page size and ndelta < ngroup. So
|
||||
* the loop executes for at most 2 * ngroup - 1 iterations, which is
|
||||
* also the bound on the number of pages in a slab chosen by default.
|
||||
* With the current default settings, this is at most 7.
|
||||
*/
|
||||
while (!perfect) {
|
||||
perfect_slab_size = try_slab_size;
|
||||
size_t perfect_nregs = try_nregs;
|
||||
try_slab_size += page;
|
||||
try_nregs = try_slab_size / reg_size;
|
||||
if (perfect_slab_size == perfect_nregs * reg_size) {
|
||||
perfect = true;
|
||||
}
|
||||
}
|
||||
return (int)(perfect_slab_size / page);
|
||||
}
|
||||
|
||||
static void
|
||||
size_class(
|
||||
/* Output. */
|
||||
sc_t *sc,
|
||||
/* Configuration decisions. */
|
||||
int lg_max_lookup, int lg_page, int lg_ngroup,
|
||||
/* Inputs specific to the size class. */
|
||||
int index, int lg_base, int lg_delta, int ndelta) {
|
||||
sc->index = index;
|
||||
sc->lg_base = lg_base;
|
||||
sc->lg_delta = lg_delta;
|
||||
sc->ndelta = ndelta;
|
||||
sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta)
|
||||
% (ZU(1) << lg_page) == 0);
|
||||
size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
|
||||
if (index == 0) {
|
||||
assert(!sc->psz);
|
||||
}
|
||||
if (size < (ZU(1) << (lg_page + lg_ngroup))) {
|
||||
sc->bin = true;
|
||||
sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta);
|
||||
} else {
|
||||
sc->bin = false;
|
||||
sc->pgs = 0;
|
||||
}
|
||||
if (size <= (ZU(1) << lg_max_lookup)) {
|
||||
sc->lg_delta_lookup = lg_delta;
|
||||
} else {
|
||||
sc->lg_delta_lookup = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
size_classes(
|
||||
/* Output. */
|
||||
sc_data_t *sc_data,
|
||||
/* Determined by the system. */
|
||||
size_t lg_ptr_size, int lg_quantum,
|
||||
/* Configuration decisions. */
|
||||
int lg_tiny_min, int lg_max_lookup, int lg_page, int lg_ngroup) {
|
||||
int ptr_bits = (1 << lg_ptr_size) * 8;
|
||||
int ngroup = (1 << lg_ngroup);
|
||||
int ntiny = 0;
|
||||
int nlbins = 0;
|
||||
int lg_tiny_maxclass = (unsigned)-1;
|
||||
int nbins = 0;
|
||||
int npsizes = 0;
|
||||
|
||||
int index = 0;
|
||||
|
||||
int ndelta = 0;
|
||||
int lg_base = lg_tiny_min;
|
||||
int lg_delta = lg_base;
|
||||
|
||||
/* Outputs that we update as we go. */
|
||||
size_t lookup_maxclass = 0;
|
||||
size_t small_maxclass = 0;
|
||||
int lg_large_minclass = 0;
|
||||
size_t large_maxclass = 0;
|
||||
|
||||
/* Tiny size classes. */
|
||||
while (lg_base < lg_quantum) {
|
||||
sc_t *sc = &sc_data->sc[index];
|
||||
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
|
||||
lg_base, lg_delta, ndelta);
|
||||
if (sc->lg_delta_lookup != 0) {
|
||||
nlbins = index + 1;
|
||||
}
|
||||
if (sc->psz) {
|
||||
npsizes++;
|
||||
}
|
||||
if (sc->bin) {
|
||||
nbins++;
|
||||
}
|
||||
ntiny++;
|
||||
/* Final written value is correct. */
|
||||
lg_tiny_maxclass = lg_base;
|
||||
index++;
|
||||
lg_delta = lg_base;
|
||||
lg_base++;
|
||||
}
|
||||
|
||||
/* First non-tiny (pseudo) group. */
|
||||
if (ntiny != 0) {
|
||||
sc_t *sc = &sc_data->sc[index];
|
||||
/*
|
||||
* See the note in sc.h; the first non-tiny size class has an
|
||||
* unusual encoding.
|
||||
*/
|
||||
lg_base--;
|
||||
ndelta = 1;
|
||||
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
|
||||
lg_base, lg_delta, ndelta);
|
||||
index++;
|
||||
lg_base++;
|
||||
lg_delta++;
|
||||
if (sc->psz) {
|
||||
npsizes++;
|
||||
}
|
||||
if (sc->bin) {
|
||||
nbins++;
|
||||
}
|
||||
}
|
||||
while (ndelta < ngroup) {
|
||||
sc_t *sc = &sc_data->sc[index];
|
||||
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
|
||||
lg_base, lg_delta, ndelta);
|
||||
index++;
|
||||
ndelta++;
|
||||
if (sc->psz) {
|
||||
npsizes++;
|
||||
}
|
||||
if (sc->bin) {
|
||||
nbins++;
|
||||
}
|
||||
}
|
||||
|
||||
/* All remaining groups. */
|
||||
lg_base = lg_base + lg_ngroup;
|
||||
while (lg_base < ptr_bits - 1) {
|
||||
ndelta = 1;
|
||||
int ndelta_limit;
|
||||
if (lg_base == ptr_bits - 2) {
|
||||
ndelta_limit = ngroup - 1;
|
||||
} else {
|
||||
ndelta_limit = ngroup;
|
||||
}
|
||||
while (ndelta <= ndelta_limit) {
|
||||
sc_t *sc = &sc_data->sc[index];
|
||||
size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
|
||||
lg_base, lg_delta, ndelta);
|
||||
if (sc->lg_delta_lookup != 0) {
|
||||
nlbins = index + 1;
|
||||
/* Final written value is correct. */
|
||||
lookup_maxclass = (ZU(1) << lg_base)
|
||||
+ (ZU(ndelta) << lg_delta);
|
||||
}
|
||||
if (sc->psz) {
|
||||
npsizes++;
|
||||
}
|
||||
if (sc->bin) {
|
||||
nbins++;
|
||||
/* Final written value is correct. */
|
||||
small_maxclass = (ZU(1) << lg_base)
|
||||
+ (ZU(ndelta) << lg_delta);
|
||||
if (lg_ngroup > 0) {
|
||||
lg_large_minclass = lg_base + 1;
|
||||
} else {
|
||||
lg_large_minclass = lg_base + 2;
|
||||
}
|
||||
}
|
||||
large_maxclass = (ZU(1) << lg_base)
|
||||
+ (ZU(ndelta) << lg_delta);
|
||||
index++;
|
||||
ndelta++;
|
||||
}
|
||||
lg_base++;
|
||||
lg_delta++;
|
||||
}
|
||||
/* Additional outputs. */
|
||||
int nsizes = index;
|
||||
unsigned lg_ceil_nsizes = lg_ceil(nsizes);
|
||||
|
||||
/* Fill in the output data. */
|
||||
sc_data->ntiny = ntiny;
|
||||
sc_data->nlbins = nlbins;
|
||||
sc_data->nbins = nbins;
|
||||
sc_data->nsizes = nsizes;
|
||||
sc_data->lg_ceil_nsizes = lg_ceil_nsizes;
|
||||
sc_data->npsizes = npsizes;
|
||||
sc_data->lg_tiny_maxclass = lg_tiny_maxclass;
|
||||
sc_data->lookup_maxclass = lookup_maxclass;
|
||||
sc_data->small_maxclass = small_maxclass;
|
||||
sc_data->lg_large_minclass = lg_large_minclass;
|
||||
sc_data->large_minclass = (ZU(1) << lg_large_minclass);
|
||||
sc_data->large_maxclass = large_maxclass;
|
||||
|
||||
/*
|
||||
* We compute these values in two ways:
|
||||
* - Incrementally, as above.
|
||||
* - In macros, in sc.h.
|
||||
* The computation is easier when done incrementally, but putting it in
|
||||
* a constant makes it available to the fast paths without having to
|
||||
* touch the extra global cacheline. We assert, however, that the two
|
||||
* computations are equivalent.
|
||||
*/
|
||||
assert(sc_data->npsizes == SC_NPSIZES);
|
||||
assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS);
|
||||
assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS);
|
||||
assert(sc_data->large_minclass == SC_LARGE_MINCLASS);
|
||||
assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
|
||||
assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
|
||||
|
||||
/*
|
||||
* In the allocation fastpath, we want to assume that we can
|
||||
* unconditionally subtract the requested allocation size from
|
||||
* a ssize_t, and detect passing through 0 correctly. This
|
||||
* results in optimal generated code. For this to work, the
|
||||
* maximum allocation size must be less than SSIZE_MAX.
|
||||
*/
|
||||
assert(SC_LARGE_MAXCLASS < SSIZE_MAX);
|
||||
}
|
||||
|
||||
void
|
||||
sc_data_init(sc_data_t *sc_data) {
|
||||
assert(!sc_data->initialized);
|
||||
|
||||
int lg_max_lookup = 12;
|
||||
|
||||
size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN,
|
||||
lg_max_lookup, LG_PAGE, 2);
|
||||
|
||||
sc_data->initialized = true;
|
||||
}
|
||||
|
||||
static void
|
||||
sc_data_update_sc_slab_size(sc_t *sc, size_t reg_size, size_t pgs_guess) {
|
||||
size_t min_pgs = reg_size / PAGE;
|
||||
if (reg_size % PAGE != 0) {
|
||||
min_pgs++;
|
||||
}
|
||||
/*
|
||||
* BITMAP_MAXBITS is actually determined by putting the smallest
|
||||
* possible size-class on one page, so this can never be 0.
|
||||
*/
|
||||
size_t max_pgs = BITMAP_MAXBITS * reg_size / PAGE;
|
||||
|
||||
assert(min_pgs <= max_pgs);
|
||||
assert(min_pgs > 0);
|
||||
assert(max_pgs >= 1);
|
||||
if (pgs_guess < min_pgs) {
|
||||
sc->pgs = (int)min_pgs;
|
||||
} else if (pgs_guess > max_pgs) {
|
||||
sc->pgs = (int)max_pgs;
|
||||
} else {
|
||||
sc->pgs = (int)pgs_guess;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
|
||||
assert(data->initialized);
|
||||
for (int i = 0; i < data->nsizes; i++) {
|
||||
sc_t *sc = &data->sc[i];
|
||||
if (!sc->bin) {
|
||||
break;
|
||||
}
|
||||
size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta,
|
||||
sc->ndelta);
|
||||
if (begin <= reg_size && reg_size <= end) {
|
||||
sc_data_update_sc_slab_size(sc, reg_size, pgs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
sc_boot(sc_data_t *data) {
|
||||
sc_data_init(data);
|
||||
}
|
||||
4
deps/jemalloc/src/spin.c
vendored
Normal file
4
deps/jemalloc/src/spin.c
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
#define JEMALLOC_SPIN_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/spin.h"
|
||||
1457
deps/jemalloc/src/stats.c
vendored
Normal file
1457
deps/jemalloc/src/stats.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
64
deps/jemalloc/src/sz.c
vendored
Normal file
64
deps/jemalloc/src/sz.c
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
size_t sz_pind2sz_tab[SC_NPSIZES+1];
|
||||
|
||||
static void
|
||||
sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
|
||||
int pind = 0;
|
||||
for (unsigned i = 0; i < SC_NSIZES; i++) {
|
||||
const sc_t *sc = &sc_data->sc[i];
|
||||
if (sc->psz) {
|
||||
sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base)
|
||||
+ (ZU(sc->ndelta) << sc->lg_delta);
|
||||
pind++;
|
||||
}
|
||||
}
|
||||
for (int i = pind; i <= (int)SC_NPSIZES; i++) {
|
||||
sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
size_t sz_index2size_tab[SC_NSIZES];
|
||||
|
||||
static void
|
||||
sz_boot_index2size_tab(const sc_data_t *sc_data) {
|
||||
for (unsigned i = 0; i < SC_NSIZES; i++) {
|
||||
const sc_t *sc = &sc_data->sc[i];
|
||||
sz_index2size_tab[i] = (ZU(1) << sc->lg_base)
|
||||
+ (ZU(sc->ndelta) << (sc->lg_delta));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* To keep this table small, we divide sizes by the tiny min size, which gives
|
||||
* the smallest interval for which the result can change.
|
||||
*/
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1];
|
||||
|
||||
static void
|
||||
sz_boot_size2index_tab(const sc_data_t *sc_data) {
|
||||
size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
|
||||
size_t dst_ind = 0;
|
||||
for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
|
||||
sc_ind++) {
|
||||
const sc_t *sc = &sc_data->sc[sc_ind];
|
||||
size_t sz = (ZU(1) << sc->lg_base)
|
||||
+ (ZU(sc->ndelta) << sc->lg_delta);
|
||||
size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1)
|
||||
>> SC_LG_TINY_MIN);
|
||||
for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
|
||||
sz_size2index_tab[dst_ind] = sc_ind;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
sz_boot(const sc_data_t *sc_data) {
|
||||
sz_boot_pind2sz_tab(sc_data);
|
||||
sz_boot_index2size_tab(sc_data);
|
||||
sz_boot_size2index_tab(sc_data);
|
||||
}
|
||||
798
deps/jemalloc/src/tcache.c
vendored
Normal file
798
deps/jemalloc/src/tcache.c
vendored
Normal file
@@ -0,0 +1,798 @@
|
||||
#define JEMALLOC_TCACHE_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
bool opt_tcache = true;
|
||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
|
||||
cache_bin_info_t *tcache_bin_info;
|
||||
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||
|
||||
unsigned nhbins;
|
||||
size_t tcache_maxclass;
|
||||
|
||||
tcaches_t *tcaches;
|
||||
|
||||
/* Index of first element within tcaches that has never been used. */
|
||||
static unsigned tcaches_past;
|
||||
|
||||
/* Head of singly linked list tracking available tcaches elements. */
|
||||
static tcaches_t *tcaches_avail;
|
||||
|
||||
/* Protects tcaches{,_past,_avail}. */
|
||||
static malloc_mutex_t tcaches_mtx;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
size_t
|
||||
tcache_salloc(tsdn_t *tsdn, const void *ptr) {
|
||||
return arena_salloc(tsdn, ptr);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
||||
szind_t binind = tcache->next_gc_bin;
|
||||
|
||||
cache_bin_t *tbin;
|
||||
if (binind < SC_NBINS) {
|
||||
tbin = tcache_small_bin_get(tcache, binind);
|
||||
} else {
|
||||
tbin = tcache_large_bin_get(tcache, binind);
|
||||
}
|
||||
if (tbin->low_water > 0) {
|
||||
/*
|
||||
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
||||
*/
|
||||
if (binind < SC_NBINS) {
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
||||
tbin->ncached - tbin->low_water + (tbin->low_water
|
||||
>> 2));
|
||||
/*
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||
* the fill count is always at least 1.
|
||||
*/
|
||||
cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
if ((tbin_info->ncached_max >>
|
||||
(tcache->lg_fill_div[binind] + 1)) >= 1) {
|
||||
tcache->lg_fill_div[binind]++;
|
||||
}
|
||||
} else {
|
||||
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
|
||||
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
}
|
||||
} else if (tbin->low_water < 0) {
|
||||
/*
|
||||
* Increase fill count by 2X for small bins. Make sure
|
||||
* lg_fill_div stays greater than 0.
|
||||
*/
|
||||
if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) {
|
||||
tcache->lg_fill_div[binind]--;
|
||||
}
|
||||
}
|
||||
tbin->low_water = tbin->ncached;
|
||||
|
||||
tcache->next_gc_bin++;
|
||||
if (tcache->next_gc_bin == nhbins) {
|
||||
tcache->next_gc_bin = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
|
||||
void *ret;
|
||||
|
||||
assert(tcache->arena != NULL);
|
||||
arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
|
||||
config_prof ? tcache->prof_accumbytes : 0);
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
ret = cache_bin_alloc_easy(tbin, tcache_success);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enabled with --enable-extra-size-check. */
|
||||
static void
|
||||
tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
|
||||
size_t nflush, extent_t **extents){
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
/*
|
||||
* Verify that the items in the tcache all have the correct size; this
|
||||
* is useful for catching sized deallocation bugs, also to fail early
|
||||
* instead of corrupting metadata. Since this can be turned on for opt
|
||||
* builds, avoid the branch in the loop.
|
||||
*/
|
||||
szind_t szind;
|
||||
size_t sz_sum = binind * nflush;
|
||||
for (unsigned i = 0 ; i < nflush; i++) {
|
||||
rtree_extent_szind_read(tsdn, &extents_rtree,
|
||||
rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true,
|
||||
&extents[i], &szind);
|
||||
sz_sum -= szind;
|
||||
}
|
||||
if (sz_sum != 0) {
|
||||
safety_check_fail("<jemalloc>: size mismatch in thread cache "
|
||||
"detected, likely caused by sized deallocation bugs by "
|
||||
"application. Abort.\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
||||
szind_t binind, unsigned rem) {
|
||||
bool merged_stats = false;
|
||||
|
||||
assert(binind < SC_NBINS);
|
||||
assert((cache_bin_sz_t)rem <= tbin->ncached);
|
||||
|
||||
arena_t *arena = tcache->arena;
|
||||
assert(arena != NULL);
|
||||
unsigned nflush = tbin->ncached - rem;
|
||||
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
|
||||
|
||||
/* Look up extent once per item. */
|
||||
if (config_opt_safety_checks) {
|
||||
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
|
||||
nflush, item_extent);
|
||||
} else {
|
||||
for (unsigned i = 0 ; i < nflush; i++) {
|
||||
item_extent[i] = iealloc(tsd_tsdn(tsd),
|
||||
*(tbin->avail - 1 - i));
|
||||
}
|
||||
}
|
||||
while (nflush > 0) {
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
extent_t *extent = item_extent[0];
|
||||
unsigned bin_arena_ind = extent_arena_ind_get(extent);
|
||||
arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
|
||||
false);
|
||||
unsigned binshard = extent_binshard_get(extent);
|
||||
assert(binshard < bin_infos[binind].n_shards);
|
||||
bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
|
||||
|
||||
if (config_prof && bin_arena == arena) {
|
||||
if (arena_prof_accum(tsd_tsdn(tsd), arena,
|
||||
tcache->prof_accumbytes)) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
if (config_stats && bin_arena == arena && !merged_stats) {
|
||||
merged_stats = true;
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
unsigned ndeferred = 0;
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
extent = item_extent[i];
|
||||
assert(ptr != NULL && extent != NULL);
|
||||
|
||||
if (extent_arena_ind_get(extent) == bin_arena_ind
|
||||
&& extent_binshard_get(extent) == binshard) {
|
||||
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
||||
bin_arena, bin, binind, extent, ptr);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
* arena bin than the one that is currently
|
||||
* locked. Stash the object, so that it can be
|
||||
* handled in a future pass.
|
||||
*/
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
item_extent[ndeferred] = extent;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
|
||||
nflush = ndeferred;
|
||||
}
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
unsigned binshard;
|
||||
bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind,
|
||||
&binshard);
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
}
|
||||
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if (tbin->ncached < tbin->low_water) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache) {
|
||||
bool merged_stats = false;
|
||||
|
||||
assert(binind < nhbins);
|
||||
assert((cache_bin_sz_t)rem <= tbin->ncached);
|
||||
|
||||
arena_t *tcache_arena = tcache->arena;
|
||||
assert(tcache_arena != NULL);
|
||||
unsigned nflush = tbin->ncached - rem;
|
||||
VARIABLE_ARRAY(extent_t *, item_extent, nflush);
|
||||
|
||||
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
|
||||
/* Look up extent once per item. */
|
||||
for (unsigned i = 0 ; i < nflush; i++) {
|
||||
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
|
||||
}
|
||||
#else
|
||||
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
|
||||
item_extent);
|
||||
#endif
|
||||
while (nflush > 0) {
|
||||
/* Lock the arena associated with the first object. */
|
||||
extent_t *extent = item_extent[0];
|
||||
unsigned locked_arena_ind = extent_arena_ind_get(extent);
|
||||
arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
|
||||
locked_arena_ind, false);
|
||||
bool idump;
|
||||
|
||||
if (config_prof) {
|
||||
idump = false;
|
||||
}
|
||||
|
||||
bool lock_large = !arena_is_auto(locked_arena);
|
||||
if (lock_large) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
||||
}
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
assert(ptr != NULL);
|
||||
extent = item_extent[i];
|
||||
if (extent_arena_ind_get(extent) == locked_arena_ind) {
|
||||
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
|
||||
extent);
|
||||
}
|
||||
}
|
||||
if ((config_prof || config_stats) &&
|
||||
(locked_arena == tcache_arena)) {
|
||||
if (config_prof) {
|
||||
idump = arena_prof_accum(tsd_tsdn(tsd),
|
||||
tcache_arena, tcache->prof_accumbytes);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
if (config_stats) {
|
||||
merged_stats = true;
|
||||
arena_stats_large_flush_nrequests_add(
|
||||
tsd_tsdn(tsd), &tcache_arena->stats, binind,
|
||||
tbin->tstats.nrequests);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
if (lock_large) {
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
||||
}
|
||||
|
||||
unsigned ndeferred = 0;
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
extent = item_extent[i];
|
||||
assert(ptr != NULL && extent != NULL);
|
||||
|
||||
if (extent_arena_ind_get(extent) == locked_arena_ind) {
|
||||
large_dalloc_finish(tsd_tsdn(tsd), extent);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
* arena than the one that is currently locked.
|
||||
* Stash the object, so that it can be handled
|
||||
* in a future pass.
|
||||
*/
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
item_extent[ndeferred] = extent;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
if (config_prof && idump) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
||||
ndeferred);
|
||||
nflush = ndeferred;
|
||||
}
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd),
|
||||
&tcache_arena->stats, binind, tbin->tstats.nrequests);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if (tbin->ncached < tbin->low_water) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
assert(tcache->arena == NULL);
|
||||
tcache->arena = arena;
|
||||
|
||||
if (config_stats) {
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
cache_bin_array_descriptor_init(
|
||||
&tcache->cache_bin_array_descriptor, tcache->bins_small,
|
||||
tcache->bins_large);
|
||||
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
|
||||
&tcache->cache_bin_array_descriptor, link);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
|
||||
arena_t *arena = tcache->arena;
|
||||
assert(arena != NULL);
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||
if (config_debug) {
|
||||
bool in_ql = false;
|
||||
tcache_t *iter;
|
||||
ql_foreach(iter, &arena->tcache_ql, link) {
|
||||
if (iter == tcache) {
|
||||
in_ql = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(in_ql);
|
||||
}
|
||||
ql_remove(&arena->tcache_ql, tcache, link);
|
||||
ql_remove(&arena->cache_bin_array_descriptor_ql,
|
||||
&tcache->cache_bin_array_descriptor, link);
|
||||
tcache_stats_merge(tsdn, tcache, arena);
|
||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||
}
|
||||
tcache->arena = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
tcache_arena_dissociate(tsdn, tcache);
|
||||
tcache_arena_associate(tsdn, tcache, arena);
|
||||
}
|
||||
|
||||
bool
|
||||
tsd_tcache_enabled_data_init(tsd_t *tsd) {
|
||||
/* Called upon tsd initialization. */
|
||||
tsd_tcache_enabled_set(tsd, opt_tcache);
|
||||
tsd_slow_update(tsd);
|
||||
|
||||
if (opt_tcache) {
|
||||
/* Trigger tcache init. */
|
||||
tsd_tcache_data_init(tsd);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Initialize auto tcache (embedded in TSD). */
|
||||
static void
|
||||
tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
|
||||
memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
|
||||
tcache->prof_accumbytes = 0;
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->arena = NULL;
|
||||
|
||||
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
|
||||
|
||||
size_t stack_offset = 0;
|
||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||
memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
|
||||
memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS));
|
||||
unsigned i = 0;
|
||||
for (; i < SC_NBINS; i++) {
|
||||
tcache->lg_fill_div[i] = 1;
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
/*
|
||||
* avail points past the available space. Allocations will
|
||||
* access the slots toward higher addresses (for the benefit of
|
||||
* prefetch).
|
||||
*/
|
||||
tcache_small_bin_get(tcache, i)->avail =
|
||||
(void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
|
||||
}
|
||||
for (; i < nhbins; i++) {
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
tcache_large_bin_get(tcache, i)->avail =
|
||||
(void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
|
||||
}
|
||||
assert(stack_offset == stack_nelms * sizeof(void *));
|
||||
}
|
||||
|
||||
/* Initialize auto tcache (embedded in TSD). */
|
||||
bool
|
||||
tsd_tcache_data_init(tsd_t *tsd) {
|
||||
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
|
||||
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
||||
size_t size = stack_nelms * sizeof(void *);
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sz_sa2u(size, CACHELINE);
|
||||
|
||||
void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
|
||||
NULL, true, arena_get(TSDN_NULL, 0, true));
|
||||
if (avail_array == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
tcache_init(tsd, tcache, avail_array);
|
||||
/*
|
||||
* Initialization is a bit tricky here. After malloc init is done, all
|
||||
* threads can rely on arena_choose and associate tcache accordingly.
|
||||
* However, the thread that does actual malloc bootstrapping relies on
|
||||
* functional tsd, and it can only rely on a0. In that case, we
|
||||
* associate its tcache to a0 temporarily, and later on
|
||||
* arena_choose_hard() will re-associate properly.
|
||||
*/
|
||||
tcache->arena = NULL;
|
||||
arena_t *arena;
|
||||
if (!malloc_initialized()) {
|
||||
/* If in initialization, assign to a0. */
|
||||
arena = arena_get(tsd_tsdn(tsd), 0, false);
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
|
||||
} else {
|
||||
arena = arena_choose(tsd, NULL);
|
||||
/* This may happen if thread.tcache.enabled is used. */
|
||||
if (tcache->arena == NULL) {
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
|
||||
}
|
||||
}
|
||||
assert(arena == tcache->arena);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Created manual tcache for tcache.create mallctl. */
|
||||
tcache_t *
|
||||
tcache_create_explicit(tsd_t *tsd) {
|
||||
tcache_t *tcache;
|
||||
size_t size, stack_offset;
|
||||
|
||||
size = sizeof(tcache_t);
|
||||
/* Naturally align the pointer stacks. */
|
||||
size = PTR_CEILING(size);
|
||||
stack_offset = size;
|
||||
size += stack_nelms * sizeof(void *);
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sz_sa2u(size, CACHELINE);
|
||||
|
||||
tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
|
||||
arena_get(TSDN_NULL, 0, true));
|
||||
if (tcache == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tcache_init(tsd, tcache,
|
||||
(void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
|
||||
|
||||
return tcache;
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
||||
assert(tcache->arena != NULL);
|
||||
|
||||
for (unsigned i = 0; i < SC_NBINS; i++) {
|
||||
cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
|
||||
|
||||
if (config_stats) {
|
||||
assert(tbin->tstats.nrequests == 0);
|
||||
}
|
||||
}
|
||||
for (unsigned i = SC_NBINS; i < nhbins; i++) {
|
||||
cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats) {
|
||||
assert(tbin->tstats.nrequests == 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
|
||||
tcache->prof_accumbytes)) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_flush(tsd_t *tsd) {
|
||||
assert(tcache_available(tsd));
|
||||
tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
|
||||
tcache_flush_cache(tsd, tcache);
|
||||
arena_t *arena = tcache->arena;
|
||||
tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
|
||||
|
||||
if (tsd_tcache) {
|
||||
/* Release the avail array for the TSD embedded auto tcache. */
|
||||
void *avail_array =
|
||||
(void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
|
||||
(uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
|
||||
idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
|
||||
} else {
|
||||
/* Release both the tcache struct and avail array. */
|
||||
idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The deallocation and tcache flush above may not trigger decay since
|
||||
* we are on the tcache shutdown path (potentially with non-nominal
|
||||
* tsd). Manually trigger decay to avoid pathological cases. Also
|
||||
* include arena 0 because the tcache array is allocated from it.
|
||||
*/
|
||||
arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false),
|
||||
false, false);
|
||||
|
||||
if (arena_nthreads_get(arena, false) == 0 &&
|
||||
!background_thread_enabled()) {
|
||||
/* Force purging when no threads assigned to the arena anymore. */
|
||||
arena_decay(tsd_tsdn(tsd), arena, false, true);
|
||||
} else {
|
||||
arena_decay(tsd_tsdn(tsd), arena, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
/* For auto tcache (embedded in TSD) only. */
|
||||
void
|
||||
tcache_cleanup(tsd_t *tsd) {
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
if (!tcache_available(tsd)) {
|
||||
assert(tsd_tcache_enabled_get(tsd) == false);
|
||||
if (config_debug) {
|
||||
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
assert(tsd_tcache_enabled_get(tsd));
|
||||
assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
|
||||
|
||||
tcache_destroy(tsd, tcache, true);
|
||||
if (config_debug) {
|
||||
tcache_small_bin_get(tcache, 0)->avail = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
unsigned i;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
/* Merge and reset tcache stats. */
|
||||
for (i = 0; i < SC_NBINS; i++) {
|
||||
cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
unsigned binshard;
|
||||
bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i,
|
||||
tbin->tstats.nrequests);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
tcaches_create_prep(tsd_t *tsd) {
|
||||
bool err;
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
|
||||
if (tcaches == NULL) {
|
||||
tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
|
||||
* (MALLOCX_TCACHE_MAX+1), CACHELINE);
|
||||
if (tcaches == NULL) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
|
||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
err = false;
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
return err;
|
||||
}
|
||||
|
||||
bool
|
||||
tcaches_create(tsd_t *tsd, unsigned *r_ind) {
|
||||
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
|
||||
|
||||
bool err;
|
||||
|
||||
if (tcaches_create_prep(tsd)) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
tcache_t *tcache = tcache_create_explicit(tsd);
|
||||
if (tcache == NULL) {
|
||||
err = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
tcaches_t *elm;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
if (tcaches_avail != NULL) {
|
||||
elm = tcaches_avail;
|
||||
tcaches_avail = tcaches_avail->next;
|
||||
elm->tcache = tcache;
|
||||
*r_ind = (unsigned)(elm - tcaches);
|
||||
} else {
|
||||
elm = &tcaches[tcaches_past];
|
||||
elm->tcache = tcache;
|
||||
*r_ind = tcaches_past;
|
||||
tcaches_past++;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
|
||||
err = false;
|
||||
label_return:
|
||||
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static tcache_t *
|
||||
tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
|
||||
if (elm->tcache == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
tcache_t *tcache = elm->tcache;
|
||||
if (allow_reinit) {
|
||||
elm->tcache = TCACHES_ELM_NEED_REINIT;
|
||||
} else {
|
||||
elm->tcache = NULL;
|
||||
}
|
||||
|
||||
if (tcache == TCACHES_ELM_NEED_REINIT) {
|
||||
return NULL;
|
||||
}
|
||||
return tcache;
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_flush(tsd_t *tsd, unsigned ind) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
if (tcache != NULL) {
|
||||
/* Destroy the tcache; recreate in tcaches_get() if needed. */
|
||||
tcache_destroy(tsd, tcache, false);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_destroy(tsd_t *tsd, unsigned ind) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
tcache_t *tcache = tcaches_elm_remove(tsd, elm, false);
|
||||
elm->next = tcaches_avail;
|
||||
tcaches_avail = elm;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
|
||||
if (tcache != NULL) {
|
||||
tcache_destroy(tsd, tcache, false);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot(tsdn_t *tsdn) {
|
||||
/* If necessary, clamp opt_lg_tcache_max. */
|
||||
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
|
||||
SC_SMALL_MAXCLASS) {
|
||||
tcache_maxclass = SC_SMALL_MAXCLASS;
|
||||
} else {
|
||||
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
|
||||
}
|
||||
|
||||
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
nhbins = sz_size2index(tcache_maxclass) + 1;
|
||||
|
||||
/* Initialize tcache_bin_info. */
|
||||
tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
|
||||
* sizeof(cache_bin_info_t), CACHELINE);
|
||||
if (tcache_bin_info == NULL) {
|
||||
return true;
|
||||
}
|
||||
stack_nelms = 0;
|
||||
unsigned i;
|
||||
for (i = 0; i < SC_NBINS; i++) {
|
||||
if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
TCACHE_NSLOTS_SMALL_MIN;
|
||||
} else if ((bin_infos[i].nregs << 1) <=
|
||||
TCACHE_NSLOTS_SMALL_MAX) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
(bin_infos[i].nregs << 1);
|
||||
} else {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
TCACHE_NSLOTS_SMALL_MAX;
|
||||
}
|
||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||
}
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
|
||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_prefork(tsdn_t *tsdn) {
|
||||
if (!config_prof && opt_tcache) {
|
||||
malloc_mutex_prefork(tsdn, &tcaches_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_postfork_parent(tsdn_t *tsdn) {
|
||||
if (!config_prof && opt_tcache) {
|
||||
malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_postfork_child(tsdn_t *tsdn) {
|
||||
if (!config_prof && opt_tcache) {
|
||||
malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
|
||||
}
|
||||
}
|
||||
12
deps/jemalloc/src/test_hooks.c
vendored
Normal file
12
deps/jemalloc/src/test_hooks.c
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
/*
|
||||
* The hooks are a little bit screwy -- they're not genuinely exported in the
|
||||
* sense that we want them available to end-users, but we do want them visible
|
||||
* from outside the generated library, so that we can use them in test code.
|
||||
*/
|
||||
JEMALLOC_EXPORT
|
||||
void (*test_hooks_arena_new_hook)() = NULL;
|
||||
|
||||
JEMALLOC_EXPORT
|
||||
void (*test_hooks_libc_hook)() = NULL;
|
||||
3
deps/jemalloc/src/ticker.c
vendored
Normal file
3
deps/jemalloc/src/ticker.c
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#define JEMALLOC_TICKER_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
534
deps/jemalloc/src/tsd.c
vendored
Normal file
534
deps/jemalloc/src/tsd.c
vendored
Normal file
@@ -0,0 +1,534 @@
|
||||
#define JEMALLOC_TSD_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
static unsigned ncleanups;
|
||||
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
|
||||
|
||||
/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
|
||||
JEMALLOC_DIAGNOSTIC_PUSH
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
|
||||
JEMALLOC_TSD_TYPE_ATTR(bool) JEMALLOC_TLS_MODEL tsd_initialized = false;
|
||||
bool tsd_booted = false;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
|
||||
pthread_key_t tsd_tsd;
|
||||
bool tsd_booted = false;
|
||||
#elif (defined(_WIN32))
|
||||
DWORD tsd_tsd;
|
||||
tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
|
||||
bool tsd_booted = false;
|
||||
#else
|
||||
|
||||
/*
|
||||
* This contains a mutex, but it's pretty convenient to allow the mutex code to
|
||||
* have a dependency on tsd. So we define the struct here, and only refer to it
|
||||
* by pointer in the header.
|
||||
*/
|
||||
struct tsd_init_head_s {
|
||||
ql_head(tsd_init_block_t) blocks;
|
||||
malloc_mutex_t lock;
|
||||
};
|
||||
|
||||
pthread_key_t tsd_tsd;
|
||||
tsd_init_head_t tsd_init_head = {
|
||||
ql_head_initializer(blocks),
|
||||
MALLOC_MUTEX_INITIALIZER
|
||||
};
|
||||
|
||||
tsd_wrapper_t tsd_boot_wrapper = {
|
||||
false,
|
||||
TSD_INITIALIZER
|
||||
};
|
||||
bool tsd_booted = false;
|
||||
#endif
|
||||
|
||||
JEMALLOC_DIAGNOSTIC_POP
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* A list of all the tsds in the nominal state. */
|
||||
typedef ql_head(tsd_t) tsd_list_t;
|
||||
static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
|
||||
static malloc_mutex_t tsd_nominal_tsds_lock;
|
||||
|
||||
/* How many slow-path-enabling features are turned on. */
|
||||
static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0);
|
||||
|
||||
static bool
|
||||
tsd_in_nominal_list(tsd_t *tsd) {
|
||||
tsd_t *tsd_list;
|
||||
bool found = false;
|
||||
/*
|
||||
* We don't know that tsd is nominal; it might not be safe to get data
|
||||
* out of it here.
|
||||
*/
|
||||
malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
|
||||
ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
|
||||
if (tsd == tsd_list) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock);
|
||||
return found;
|
||||
}
|
||||
|
||||
static void
|
||||
tsd_add_nominal(tsd_t *tsd) {
|
||||
assert(!tsd_in_nominal_list(tsd));
|
||||
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
|
||||
ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
|
||||
ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
tsd_remove_nominal(tsd_t *tsd) {
|
||||
assert(tsd_in_nominal_list(tsd));
|
||||
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
|
||||
ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
tsd_force_recompute(tsdn_t *tsdn) {
|
||||
/*
|
||||
* The stores to tsd->state here need to synchronize with the exchange
|
||||
* in tsd_slow_update.
|
||||
*/
|
||||
atomic_fence(ATOMIC_RELEASE);
|
||||
malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
|
||||
tsd_t *remote_tsd;
|
||||
ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
|
||||
assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
|
||||
<= tsd_state_nominal_max);
|
||||
tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
|
||||
}
|
||||
|
||||
void
|
||||
tsd_global_slow_inc(tsdn_t *tsdn) {
|
||||
atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
|
||||
/*
|
||||
* We unconditionally force a recompute, even if the global slow count
|
||||
* was already positive. If we didn't, then it would be possible for us
|
||||
* to return to the user, have the user synchronize externally with some
|
||||
* other thread, and then have that other thread not have picked up the
|
||||
* update yet (since the original incrementing thread might still be
|
||||
* making its way through the tsd list).
|
||||
*/
|
||||
tsd_force_recompute(tsdn);
|
||||
}
|
||||
|
||||
void tsd_global_slow_dec(tsdn_t *tsdn) {
|
||||
atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
|
||||
/* See the note in ..._inc(). */
|
||||
tsd_force_recompute(tsdn);
|
||||
}
|
||||
|
||||
static bool
|
||||
tsd_local_slow(tsd_t *tsd) {
|
||||
return !tsd_tcache_enabled_get(tsd)
|
||||
|| tsd_reentrancy_level_get(tsd) > 0;
|
||||
}
|
||||
|
||||
bool
|
||||
tsd_global_slow() {
|
||||
return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static uint8_t
|
||||
tsd_state_compute(tsd_t *tsd) {
|
||||
if (!tsd_nominal(tsd)) {
|
||||
return tsd_state_get(tsd);
|
||||
}
|
||||
/* We're in *a* nominal state; but which one? */
|
||||
if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) {
|
||||
return tsd_state_nominal_slow;
|
||||
} else {
|
||||
return tsd_state_nominal;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tsd_slow_update(tsd_t *tsd) {
|
||||
uint8_t old_state;
|
||||
do {
|
||||
uint8_t new_state = tsd_state_compute(tsd);
|
||||
old_state = tsd_atomic_exchange(&tsd->state, new_state,
|
||||
ATOMIC_ACQUIRE);
|
||||
} while (old_state == tsd_state_nominal_recompute);
|
||||
}
|
||||
|
||||
void
|
||||
tsd_state_set(tsd_t *tsd, uint8_t new_state) {
|
||||
/* Only the tsd module can change the state *to* recompute. */
|
||||
assert(new_state != tsd_state_nominal_recompute);
|
||||
uint8_t old_state = tsd_atomic_load(&tsd->state, ATOMIC_RELAXED);
|
||||
if (old_state > tsd_state_nominal_max) {
|
||||
/*
|
||||
* Not currently in the nominal list, but it might need to be
|
||||
* inserted there.
|
||||
*/
|
||||
assert(!tsd_in_nominal_list(tsd));
|
||||
tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED);
|
||||
if (new_state <= tsd_state_nominal_max) {
|
||||
tsd_add_nominal(tsd);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* We're currently nominal. If the new state is non-nominal,
|
||||
* great; we take ourselves off the list and just enter the new
|
||||
* state.
|
||||
*/
|
||||
assert(tsd_in_nominal_list(tsd));
|
||||
if (new_state > tsd_state_nominal_max) {
|
||||
tsd_remove_nominal(tsd);
|
||||
tsd_atomic_store(&tsd->state, new_state,
|
||||
ATOMIC_RELAXED);
|
||||
} else {
|
||||
/*
|
||||
* This is the tricky case. We're transitioning from
|
||||
* one nominal state to another. The caller can't know
|
||||
* about any races that are occuring at the same time,
|
||||
* so we always have to recompute no matter what.
|
||||
*/
|
||||
tsd_slow_update(tsd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
tsd_data_init(tsd_t *tsd) {
|
||||
/*
|
||||
* We initialize the rtree context first (before the tcache), since the
|
||||
* tcache initialization depends on it.
|
||||
*/
|
||||
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
|
||||
|
||||
/*
|
||||
* A nondeterministic seed based on the address of tsd reduces
|
||||
* the likelihood of lockstep non-uniform cache index
|
||||
* utilization among identical concurrent processes, but at the
|
||||
* cost of test repeatability. For debug builds, instead use a
|
||||
* deterministic seed.
|
||||
*/
|
||||
*tsd_offset_statep_get(tsd) = config_debug ? 0 :
|
||||
(uint64_t)(uintptr_t)tsd;
|
||||
|
||||
return tsd_tcache_enabled_data_init(tsd);
|
||||
}
|
||||
|
||||
static void
|
||||
assert_tsd_data_cleanup_done(tsd_t *tsd) {
|
||||
assert(!tsd_nominal(tsd));
|
||||
assert(!tsd_in_nominal_list(tsd));
|
||||
assert(*tsd_arenap_get_unsafe(tsd) == NULL);
|
||||
assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
|
||||
assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
|
||||
assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
|
||||
assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
|
||||
assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
|
||||
}
|
||||
|
||||
static bool
|
||||
tsd_data_init_nocleanup(tsd_t *tsd) {
|
||||
assert(tsd_state_get(tsd) == tsd_state_reincarnated ||
|
||||
tsd_state_get(tsd) == tsd_state_minimal_initialized);
|
||||
/*
|
||||
* During reincarnation, there is no guarantee that the cleanup function
|
||||
* will be called (deallocation may happen after all tsd destructors).
|
||||
* We set up tsd in a way that no cleanup is needed.
|
||||
*/
|
||||
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
|
||||
*tsd_arenas_tdata_bypassp_get(tsd) = true;
|
||||
*tsd_tcache_enabledp_get_unsafe(tsd) = false;
|
||||
*tsd_reentrancy_levelp_get(tsd) = 1;
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
tsd_t *
|
||||
tsd_fetch_slow(tsd_t *tsd, bool minimal) {
|
||||
assert(!tsd_fast(tsd));
|
||||
|
||||
if (tsd_state_get(tsd) == tsd_state_nominal_slow) {
|
||||
/*
|
||||
* On slow path but no work needed. Note that we can't
|
||||
* necessarily *assert* that we're slow, because we might be
|
||||
* slow because of an asynchronous modification to global state,
|
||||
* which might be asynchronously modified *back*.
|
||||
*/
|
||||
} else if (tsd_state_get(tsd) == tsd_state_nominal_recompute) {
|
||||
tsd_slow_update(tsd);
|
||||
} else if (tsd_state_get(tsd) == tsd_state_uninitialized) {
|
||||
if (!minimal) {
|
||||
if (tsd_booted) {
|
||||
tsd_state_set(tsd, tsd_state_nominal);
|
||||
tsd_slow_update(tsd);
|
||||
/* Trigger cleanup handler registration. */
|
||||
tsd_set(tsd);
|
||||
tsd_data_init(tsd);
|
||||
}
|
||||
} else {
|
||||
tsd_state_set(tsd, tsd_state_minimal_initialized);
|
||||
tsd_set(tsd);
|
||||
tsd_data_init_nocleanup(tsd);
|
||||
}
|
||||
} else if (tsd_state_get(tsd) == tsd_state_minimal_initialized) {
|
||||
if (!minimal) {
|
||||
/* Switch to fully initialized. */
|
||||
tsd_state_set(tsd, tsd_state_nominal);
|
||||
assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
|
||||
(*tsd_reentrancy_levelp_get(tsd))--;
|
||||
tsd_slow_update(tsd);
|
||||
tsd_data_init(tsd);
|
||||
} else {
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
}
|
||||
} else if (tsd_state_get(tsd) == tsd_state_purgatory) {
|
||||
tsd_state_set(tsd, tsd_state_reincarnated);
|
||||
tsd_set(tsd);
|
||||
tsd_data_init_nocleanup(tsd);
|
||||
} else {
|
||||
assert(tsd_state_get(tsd) == tsd_state_reincarnated);
|
||||
}
|
||||
|
||||
return tsd;
|
||||
}
|
||||
|
||||
void *
|
||||
malloc_tsd_malloc(size_t size) {
|
||||
return a0malloc(CACHELINE_CEILING(size));
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_dalloc(void *wrapper) {
|
||||
a0dalloc(wrapper);
|
||||
}
|
||||
|
||||
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
|
||||
#ifndef _WIN32
|
||||
JEMALLOC_EXPORT
|
||||
#endif
|
||||
void
|
||||
_malloc_thread_cleanup(void) {
|
||||
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ncleanups; i++) {
|
||||
pending[i] = true;
|
||||
}
|
||||
|
||||
do {
|
||||
again = false;
|
||||
for (i = 0; i < ncleanups; i++) {
|
||||
if (pending[i]) {
|
||||
pending[i] = cleanups[i]();
|
||||
if (pending[i]) {
|
||||
again = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (again);
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
malloc_tsd_cleanup_register(bool (*f)(void)) {
|
||||
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
|
||||
cleanups[ncleanups] = f;
|
||||
ncleanups++;
|
||||
}
|
||||
|
||||
static void
|
||||
tsd_do_data_cleanup(tsd_t *tsd) {
|
||||
prof_tdata_cleanup(tsd);
|
||||
iarena_cleanup(tsd);
|
||||
arena_cleanup(tsd);
|
||||
arenas_tdata_cleanup(tsd);
|
||||
tcache_cleanup(tsd);
|
||||
witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
|
||||
}
|
||||
|
||||
void
|
||||
tsd_cleanup(void *arg) {
|
||||
tsd_t *tsd = (tsd_t *)arg;
|
||||
|
||||
switch (tsd_state_get(tsd)) {
|
||||
case tsd_state_uninitialized:
|
||||
/* Do nothing. */
|
||||
break;
|
||||
case tsd_state_minimal_initialized:
|
||||
/* This implies the thread only did free() in its life time. */
|
||||
/* Fall through. */
|
||||
case tsd_state_reincarnated:
|
||||
/*
|
||||
* Reincarnated means another destructor deallocated memory
|
||||
* after the destructor was called. Cleanup isn't required but
|
||||
* is still called for testing and completeness.
|
||||
*/
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
/* Fall through. */
|
||||
case tsd_state_nominal:
|
||||
case tsd_state_nominal_slow:
|
||||
tsd_do_data_cleanup(tsd);
|
||||
tsd_state_set(tsd, tsd_state_purgatory);
|
||||
tsd_set(tsd);
|
||||
break;
|
||||
case tsd_state_purgatory:
|
||||
/*
|
||||
* The previous time this destructor was called, we set the
|
||||
* state to tsd_state_purgatory so that other destructors
|
||||
* wouldn't cause re-creation of the tsd. This time, do
|
||||
* nothing, and do not request another callback.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
not_reached();
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
|
||||
int *data = tsd_test_datap_get_unsafe(tsd);
|
||||
if (test_callback != NULL) {
|
||||
test_callback(data);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
tsd_t *
|
||||
malloc_tsd_boot0(void) {
|
||||
tsd_t *tsd;
|
||||
|
||||
ncleanups = 0;
|
||||
if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
|
||||
WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
|
||||
return NULL;
|
||||
}
|
||||
if (tsd_boot0()) {
|
||||
return NULL;
|
||||
}
|
||||
tsd = tsd_fetch();
|
||||
*tsd_arenas_tdata_bypassp_get(tsd) = true;
|
||||
return tsd;
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_boot1(void) {
|
||||
tsd_boot1();
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
/* malloc_slow has been set properly. Update tsd_slow. */
|
||||
tsd_slow_update(tsd);
|
||||
*tsd_arenas_tdata_bypassp_get(tsd) = false;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
static BOOL WINAPI
|
||||
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
|
||||
switch (fdwReason) {
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
case DLL_THREAD_ATTACH:
|
||||
isthreaded = true;
|
||||
break;
|
||||
#endif
|
||||
case DLL_THREAD_DETACH:
|
||||
_malloc_thread_cleanup();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to be able to say "read" here (in the "pragma section"), but have
|
||||
* hooked "read". We won't read for the rest of the file, so we can get away
|
||||
* with unhooking.
|
||||
*/
|
||||
#ifdef read
|
||||
# undef read
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# ifdef _M_IX86
|
||||
# pragma comment(linker, "/INCLUDE:__tls_used")
|
||||
# pragma comment(linker, "/INCLUDE:_tls_callback")
|
||||
# else
|
||||
# pragma comment(linker, "/INCLUDE:_tls_used")
|
||||
# pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) )
|
||||
# endif
|
||||
# pragma section(".CRT$XLY",long,read)
|
||||
#endif
|
||||
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
|
||||
BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
|
||||
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
|
||||
#endif
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
void *
|
||||
tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
|
||||
pthread_t self = pthread_self();
|
||||
tsd_init_block_t *iter;
|
||||
|
||||
/* Check whether this thread has already inserted into the list. */
|
||||
malloc_mutex_lock(TSDN_NULL, &head->lock);
|
||||
ql_foreach(iter, &head->blocks, link) {
|
||||
if (iter->thread == self) {
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
return iter->data;
|
||||
}
|
||||
}
|
||||
/* Insert block into list. */
|
||||
ql_elm_new(block, link);
|
||||
block->thread = self;
|
||||
ql_tail_insert(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
|
||||
malloc_mutex_lock(TSDN_NULL, &head->lock);
|
||||
ql_remove(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
tsd_prefork(tsd_t *tsd) {
|
||||
malloc_mutex_prefork(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
|
||||
}
|
||||
|
||||
void
|
||||
tsd_postfork_parent(tsd_t *tsd) {
|
||||
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
|
||||
}
|
||||
|
||||
void
|
||||
tsd_postfork_child(tsd_t *tsd) {
|
||||
malloc_mutex_postfork_child(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
|
||||
ql_new(&tsd_nominal_tsds);
|
||||
|
||||
if (tsd_state_get(tsd) <= tsd_state_nominal_max) {
|
||||
tsd_add_nominal(tsd);
|
||||
}
|
||||
}
|
||||
648
deps/jemalloc/src/util.c
vendored
Normal file
648
deps/jemalloc/src/util.c
vendored
Normal file
@@ -0,0 +1,648 @@
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
malloc_write("<jemalloc>: Failed assertion\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define not_reached() do { \
|
||||
if (config_debug) { \
|
||||
malloc_write("<jemalloc>: Unreachable code reached\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define not_implemented() do { \
|
||||
if (config_debug) { \
|
||||
malloc_write("<jemalloc>: Not implemented\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define JEMALLOC_UTIL_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void wrtmessage(void *cbopaque, const char *s);
|
||||
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
|
||||
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
|
||||
size_t *slen_p);
|
||||
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
|
||||
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
|
||||
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
|
||||
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
|
||||
size_t *slen_p);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* malloc_message() setup. */
|
||||
static void
|
||||
wrtmessage(void *cbopaque, const char *s)
|
||||
{
|
||||
|
||||
#ifdef SYS_write
|
||||
/*
|
||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||
* the possibility of memory allocation within libc. This is necessary
|
||||
* on FreeBSD; most operating systems do not have this problem though.
|
||||
*/
|
||||
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
|
||||
#else
|
||||
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
|
||||
|
||||
/*
|
||||
* Wrapper around malloc_message() that avoids the need for
|
||||
* je_malloc_message(...) throughout the code.
|
||||
*/
|
||||
void
|
||||
malloc_write(const char *s)
|
||||
{
|
||||
|
||||
if (je_malloc_message != NULL)
|
||||
je_malloc_message(NULL, s);
|
||||
else
|
||||
wrtmessage(NULL, s);
|
||||
}
|
||||
|
||||
/*
|
||||
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
|
||||
* provide a wrapper.
|
||||
*/
|
||||
int
|
||||
buferror(int err, char *buf, size_t buflen)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
|
||||
(LPSTR)buf, buflen, NULL);
|
||||
return (0);
|
||||
#elif defined(_GNU_SOURCE)
|
||||
char *b = strerror_r(err, buf, buflen);
|
||||
if (b != buf) {
|
||||
strncpy(buf, b, buflen);
|
||||
buf[buflen-1] = '\0';
|
||||
}
|
||||
return (0);
|
||||
#else
|
||||
return (strerror_r(err, buf, buflen));
|
||||
#endif
|
||||
}
|
||||
|
||||
uintmax_t
|
||||
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
|
||||
{
|
||||
uintmax_t ret, digit;
|
||||
int b;
|
||||
bool neg;
|
||||
const char *p, *ns;
|
||||
|
||||
p = nptr;
|
||||
if (base < 0 || base == 1 || base > 36) {
|
||||
ns = p;
|
||||
set_errno(EINVAL);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
b = base;
|
||||
|
||||
/* Swallow leading whitespace and get sign, if any. */
|
||||
neg = false;
|
||||
while (true) {
|
||||
switch (*p) {
|
||||
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
|
||||
p++;
|
||||
break;
|
||||
case '-':
|
||||
neg = true;
|
||||
/* Fall through. */
|
||||
case '+':
|
||||
p++;
|
||||
/* Fall through. */
|
||||
default:
|
||||
goto label_prefix;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get prefix, if any. */
|
||||
label_prefix:
|
||||
/*
|
||||
* Note where the first non-whitespace/sign character is so that it is
|
||||
* possible to tell whether any digits are consumed (e.g., " 0" vs.
|
||||
* " -x").
|
||||
*/
|
||||
ns = p;
|
||||
if (*p == '0') {
|
||||
switch (p[1]) {
|
||||
case '0': case '1': case '2': case '3': case '4': case '5':
|
||||
case '6': case '7':
|
||||
if (b == 0)
|
||||
b = 8;
|
||||
if (b == 8)
|
||||
p++;
|
||||
break;
|
||||
case 'X': case 'x':
|
||||
switch (p[2]) {
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
case 'A': case 'B': case 'C': case 'D': case 'E':
|
||||
case 'F':
|
||||
case 'a': case 'b': case 'c': case 'd': case 'e':
|
||||
case 'f':
|
||||
if (b == 0)
|
||||
b = 16;
|
||||
if (b == 16)
|
||||
p += 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
p++;
|
||||
ret = 0;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
if (b == 0)
|
||||
b = 10;
|
||||
|
||||
/* Convert. */
|
||||
ret = 0;
|
||||
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|
||||
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|
||||
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
|
||||
uintmax_t pret = ret;
|
||||
ret *= b;
|
||||
ret += digit;
|
||||
if (ret < pret) {
|
||||
/* Overflow. */
|
||||
set_errno(ERANGE);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
if (neg)
|
||||
ret = -ret;
|
||||
|
||||
if (p == ns) {
|
||||
/* No conversion performed. */
|
||||
set_errno(EINVAL);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
label_return:
|
||||
if (endptr != NULL) {
|
||||
if (p == ns) {
|
||||
/* No characters were converted. */
|
||||
*endptr = (char *)nptr;
|
||||
} else
|
||||
*endptr = (char *)p;
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static char *
|
||||
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
i = U2S_BUFSIZE - 1;
|
||||
s[i] = '\0';
|
||||
switch (base) {
|
||||
case 10:
|
||||
do {
|
||||
i--;
|
||||
s[i] = "0123456789"[x % (uint64_t)10];
|
||||
x /= (uint64_t)10;
|
||||
} while (x > 0);
|
||||
break;
|
||||
case 16: {
|
||||
const char *digits = (uppercase)
|
||||
? "0123456789ABCDEF"
|
||||
: "0123456789abcdef";
|
||||
|
||||
do {
|
||||
i--;
|
||||
s[i] = digits[x & 0xf];
|
||||
x >>= 4;
|
||||
} while (x > 0);
|
||||
break;
|
||||
} default: {
|
||||
const char *digits = (uppercase)
|
||||
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
: "0123456789abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
assert(base >= 2 && base <= 36);
|
||||
do {
|
||||
i--;
|
||||
s[i] = digits[x % (uint64_t)base];
|
||||
x /= (uint64_t)base;
|
||||
} while (x > 0);
|
||||
}}
|
||||
|
||||
*slen_p = U2S_BUFSIZE - 1 - i;
|
||||
return (&s[i]);
|
||||
}
|
||||
|
||||
static char *
|
||||
d2s(intmax_t x, char sign, char *s, size_t *slen_p)
|
||||
{
|
||||
bool neg;
|
||||
|
||||
if ((neg = (x < 0)))
|
||||
x = -x;
|
||||
s = u2s(x, 10, false, s, slen_p);
|
||||
if (neg)
|
||||
sign = '-';
|
||||
switch (sign) {
|
||||
case '-':
|
||||
if (neg == false)
|
||||
break;
|
||||
/* Fall through. */
|
||||
case ' ':
|
||||
case '+':
|
||||
s--;
|
||||
(*slen_p)++;
|
||||
*s = sign;
|
||||
break;
|
||||
default: not_reached();
|
||||
}
|
||||
return (s);
|
||||
}
|
||||
|
||||
static char *
|
||||
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
|
||||
{
|
||||
|
||||
s = u2s(x, 8, false, s, slen_p);
|
||||
if (alt_form && *s != '0') {
|
||||
s--;
|
||||
(*slen_p)++;
|
||||
*s = '0';
|
||||
}
|
||||
return (s);
|
||||
}
|
||||
|
||||
static char *
|
||||
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
|
||||
{
|
||||
|
||||
s = u2s(x, 16, uppercase, s, slen_p);
|
||||
if (alt_form) {
|
||||
s -= 2;
|
||||
(*slen_p) += 2;
|
||||
memcpy(s, uppercase ? "0X" : "0x", 2);
|
||||
}
|
||||
return (s);
|
||||
}
|
||||
|
||||
int
|
||||
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
{
|
||||
int ret;
|
||||
size_t i;
|
||||
const char *f;
|
||||
|
||||
#define APPEND_C(c) do { \
|
||||
if (i < size) \
|
||||
str[i] = (c); \
|
||||
i++; \
|
||||
} while (0)
|
||||
#define APPEND_S(s, slen) do { \
|
||||
if (i < size) { \
|
||||
size_t cpylen = (slen <= size - i) ? slen : size - i; \
|
||||
memcpy(&str[i], s, cpylen); \
|
||||
} \
|
||||
i += slen; \
|
||||
} while (0)
|
||||
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
|
||||
/* Left padding. */ \
|
||||
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
|
||||
(size_t)width - slen : 0); \
|
||||
if (left_justify == false && pad_len != 0) { \
|
||||
size_t j; \
|
||||
for (j = 0; j < pad_len; j++) \
|
||||
APPEND_C(' '); \
|
||||
} \
|
||||
/* Value. */ \
|
||||
APPEND_S(s, slen); \
|
||||
/* Right padding. */ \
|
||||
if (left_justify && pad_len != 0) { \
|
||||
size_t j; \
|
||||
for (j = 0; j < pad_len; j++) \
|
||||
APPEND_C(' '); \
|
||||
} \
|
||||
} while (0)
|
||||
#define GET_ARG_NUMERIC(val, len) do { \
|
||||
switch (len) { \
|
||||
case '?': \
|
||||
val = va_arg(ap, int); \
|
||||
break; \
|
||||
case '?' | 0x80: \
|
||||
val = va_arg(ap, unsigned int); \
|
||||
break; \
|
||||
case 'l': \
|
||||
val = va_arg(ap, long); \
|
||||
break; \
|
||||
case 'l' | 0x80: \
|
||||
val = va_arg(ap, unsigned long); \
|
||||
break; \
|
||||
case 'q': \
|
||||
val = va_arg(ap, long long); \
|
||||
break; \
|
||||
case 'q' | 0x80: \
|
||||
val = va_arg(ap, unsigned long long); \
|
||||
break; \
|
||||
case 'j': \
|
||||
val = va_arg(ap, intmax_t); \
|
||||
break; \
|
||||
case 'j' | 0x80: \
|
||||
val = va_arg(ap, uintmax_t); \
|
||||
break; \
|
||||
case 't': \
|
||||
val = va_arg(ap, ptrdiff_t); \
|
||||
break; \
|
||||
case 'z': \
|
||||
val = va_arg(ap, ssize_t); \
|
||||
break; \
|
||||
case 'z' | 0x80: \
|
||||
val = va_arg(ap, size_t); \
|
||||
break; \
|
||||
case 'p': /* Synthetic; used for %p. */ \
|
||||
val = va_arg(ap, uintptr_t); \
|
||||
break; \
|
||||
default: not_reached(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
i = 0;
|
||||
f = format;
|
||||
while (true) {
|
||||
switch (*f) {
|
||||
case '\0': goto label_out;
|
||||
case '%': {
|
||||
bool alt_form = false;
|
||||
bool left_justify = false;
|
||||
bool plus_space = false;
|
||||
bool plus_plus = false;
|
||||
int prec = -1;
|
||||
int width = -1;
|
||||
unsigned char len = '?';
|
||||
|
||||
f++;
|
||||
/* Flags. */
|
||||
while (true) {
|
||||
switch (*f) {
|
||||
case '#':
|
||||
assert(alt_form == false);
|
||||
alt_form = true;
|
||||
break;
|
||||
case '-':
|
||||
assert(left_justify == false);
|
||||
left_justify = true;
|
||||
break;
|
||||
case ' ':
|
||||
assert(plus_space == false);
|
||||
plus_space = true;
|
||||
break;
|
||||
case '+':
|
||||
assert(plus_plus == false);
|
||||
plus_plus = true;
|
||||
break;
|
||||
default: goto label_width;
|
||||
}
|
||||
f++;
|
||||
}
|
||||
/* Width. */
|
||||
label_width:
|
||||
switch (*f) {
|
||||
case '*':
|
||||
width = va_arg(ap, int);
|
||||
f++;
|
||||
if (width < 0) {
|
||||
left_justify = true;
|
||||
width = -width;
|
||||
}
|
||||
break;
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9': {
|
||||
uintmax_t uwidth;
|
||||
set_errno(0);
|
||||
uwidth = malloc_strtoumax(f, (char **)&f, 10);
|
||||
assert(uwidth != UINTMAX_MAX || get_errno() !=
|
||||
ERANGE);
|
||||
width = (int)uwidth;
|
||||
break;
|
||||
} default:
|
||||
break;
|
||||
}
|
||||
/* Width/precision separator. */
|
||||
if (*f == '.')
|
||||
f++;
|
||||
else
|
||||
goto label_length;
|
||||
/* Precision. */
|
||||
switch (*f) {
|
||||
case '*':
|
||||
prec = va_arg(ap, int);
|
||||
f++;
|
||||
break;
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9': {
|
||||
uintmax_t uprec;
|
||||
set_errno(0);
|
||||
uprec = malloc_strtoumax(f, (char **)&f, 10);
|
||||
assert(uprec != UINTMAX_MAX || get_errno() !=
|
||||
ERANGE);
|
||||
prec = (int)uprec;
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
}
|
||||
/* Length. */
|
||||
label_length:
|
||||
switch (*f) {
|
||||
case 'l':
|
||||
f++;
|
||||
if (*f == 'l') {
|
||||
len = 'q';
|
||||
f++;
|
||||
} else
|
||||
len = 'l';
|
||||
break;
|
||||
case 'q': case 'j': case 't': case 'z':
|
||||
len = *f;
|
||||
f++;
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
/* Conversion specifier. */
|
||||
switch (*f) {
|
||||
char *s;
|
||||
size_t slen;
|
||||
case '%':
|
||||
/* %% */
|
||||
APPEND_C(*f);
|
||||
f++;
|
||||
break;
|
||||
case 'd': case 'i': {
|
||||
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[D2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len);
|
||||
s = d2s(val, (plus_plus ? '+' : (plus_space ?
|
||||
' ' : '-')), buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'o': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[O2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = o2s(val, alt_form, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'u': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[U2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = u2s(val, 10, false, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'x': case 'X': {
|
||||
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[X2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, len | 0x80);
|
||||
s = x2s(val, alt_form, *f == 'X', buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 'c': {
|
||||
unsigned char val;
|
||||
char buf[2];
|
||||
|
||||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
val = va_arg(ap, int);
|
||||
buf[0] = val;
|
||||
buf[1] = '\0';
|
||||
APPEND_PADDED_S(buf, 1, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} case 's':
|
||||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
s = va_arg(ap, char *);
|
||||
slen = (prec < 0) ? strlen(s) : prec;
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
case 'p': {
|
||||
uintmax_t val;
|
||||
char buf[X2S_BUFSIZE];
|
||||
|
||||
GET_ARG_NUMERIC(val, 'p');
|
||||
s = x2s(val, true, false, buf, &slen);
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
} default: not_reached();
|
||||
}
|
||||
break;
|
||||
} default: {
|
||||
APPEND_C(*f);
|
||||
f++;
|
||||
break;
|
||||
}}
|
||||
}
|
||||
label_out:
|
||||
if (i < size)
|
||||
str[i] = '\0';
|
||||
else
|
||||
str[size - 1] = '\0';
|
||||
ret = i;
|
||||
|
||||
#undef APPEND_C
|
||||
#undef APPEND_S
|
||||
#undef APPEND_PADDED_S
|
||||
#undef GET_ARG_NUMERIC
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ATTR(format(printf, 3, 4))
|
||||
int
|
||||
malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
{
|
||||
int ret;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
ret = malloc_vsnprintf(str, size, format, ap);
|
||||
va_end(ap);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap)
|
||||
{
|
||||
char buf[MALLOC_PRINTF_BUFSIZE];
|
||||
|
||||
if (write_cb == NULL) {
|
||||
/*
|
||||
* The caller did not provide an alternate write_cb callback
|
||||
* function, so use the default one. malloc_write() is an
|
||||
* inline function, so use malloc_message() directly here.
|
||||
*/
|
||||
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
|
||||
wrtmessage;
|
||||
cbopaque = NULL;
|
||||
}
|
||||
|
||||
malloc_vsnprintf(buf, sizeof(buf), format, ap);
|
||||
write_cb(cbopaque, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print to a callback function in such a way as to (hopefully) avoid memory
|
||||
* allocation.
|
||||
*/
|
||||
JEMALLOC_ATTR(format(printf, 3, 4))
|
||||
void
|
||||
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(write_cb, cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/* Print to stderr in such a way as to avoid memory allocation. */
|
||||
JEMALLOC_ATTR(format(printf, 1, 2))
|
||||
void
|
||||
malloc_printf(const char *format, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(NULL, NULL, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
100
deps/jemalloc/src/witness.c
vendored
Normal file
100
deps/jemalloc/src/witness.c
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
#define JEMALLOC_WITNESS_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
|
||||
void
|
||||
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
|
||||
witness_comp_t *comp, void *opaque) {
|
||||
witness->name = name;
|
||||
witness->rank = rank;
|
||||
witness->comp = comp;
|
||||
witness->opaque = opaque;
|
||||
}
|
||||
|
||||
static void
|
||||
witness_lock_error_impl(const witness_list_t *witnesses,
|
||||
const witness_t *witness) {
|
||||
witness_t *w;
|
||||
|
||||
malloc_printf("<jemalloc>: Lock rank order reversal:");
|
||||
ql_foreach(w, witnesses, link) {
|
||||
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||
}
|
||||
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
|
||||
abort();
|
||||
}
|
||||
witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
|
||||
|
||||
static void
|
||||
witness_owner_error_impl(const witness_t *witness) {
|
||||
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
|
||||
witness->rank);
|
||||
abort();
|
||||
}
|
||||
witness_owner_error_t *JET_MUTABLE witness_owner_error =
|
||||
witness_owner_error_impl;
|
||||
|
||||
static void
|
||||
witness_not_owner_error_impl(const witness_t *witness) {
|
||||
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
|
||||
witness->rank);
|
||||
abort();
|
||||
}
|
||||
witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
|
||||
witness_not_owner_error_impl;
|
||||
|
||||
static void
|
||||
witness_depth_error_impl(const witness_list_t *witnesses,
|
||||
witness_rank_t rank_inclusive, unsigned depth) {
|
||||
witness_t *w;
|
||||
|
||||
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
|
||||
(depth != 1) ? "s" : "", rank_inclusive);
|
||||
ql_foreach(w, witnesses, link) {
|
||||
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||
}
|
||||
malloc_printf("\n");
|
||||
abort();
|
||||
}
|
||||
witness_depth_error_t *JET_MUTABLE witness_depth_error =
|
||||
witness_depth_error_impl;
|
||||
|
||||
void
|
||||
witnesses_cleanup(witness_tsd_t *witness_tsd) {
|
||||
witness_assert_lockless(witness_tsd_tsdn(witness_tsd));
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
void
|
||||
witness_prefork(witness_tsd_t *witness_tsd) {
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
witness_tsd->forking = true;
|
||||
}
|
||||
|
||||
void
|
||||
witness_postfork_parent(witness_tsd_t *witness_tsd) {
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
witness_tsd->forking = false;
|
||||
}
|
||||
|
||||
void
|
||||
witness_postfork_child(witness_tsd_t *witness_tsd) {
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
witness_list_t *witnesses;
|
||||
|
||||
witnesses = &witness_tsd->witnesses;
|
||||
ql_new(witnesses);
|
||||
#endif
|
||||
witness_tsd->forking = false;
|
||||
}
|
||||
469
deps/jemalloc/src/zone.c
vendored
Normal file
469
deps/jemalloc/src/zone.c
vendored
Normal file
@@ -0,0 +1,469 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
#ifndef JEMALLOC_ZONE
|
||||
# error "This source file is for zones on Darwin (OS X)."
|
||||
#endif
|
||||
|
||||
/* Definitions of the following structs in malloc/malloc.h might be too old
|
||||
* for the built binary to run on newer versions of OSX. So use the newest
|
||||
* possible version of those structs.
|
||||
*/
|
||||
typedef struct _malloc_zone_t {
|
||||
void *reserved1;
|
||||
void *reserved2;
|
||||
size_t (*size)(struct _malloc_zone_t *, const void *);
|
||||
void *(*malloc)(struct _malloc_zone_t *, size_t);
|
||||
void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
|
||||
void *(*valloc)(struct _malloc_zone_t *, size_t);
|
||||
void (*free)(struct _malloc_zone_t *, void *);
|
||||
void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
|
||||
void (*destroy)(struct _malloc_zone_t *);
|
||||
const char *zone_name;
|
||||
unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
|
||||
void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
|
||||
struct malloc_introspection_t *introspect;
|
||||
unsigned version;
|
||||
void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
|
||||
void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
|
||||
size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
|
||||
} malloc_zone_t;
|
||||
|
||||
typedef struct {
|
||||
vm_address_t address;
|
||||
vm_size_t size;
|
||||
} vm_range_t;
|
||||
|
||||
typedef struct malloc_statistics_t {
|
||||
unsigned blocks_in_use;
|
||||
size_t size_in_use;
|
||||
size_t max_size_in_use;
|
||||
size_t size_allocated;
|
||||
} malloc_statistics_t;
|
||||
|
||||
typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
|
||||
|
||||
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
|
||||
|
||||
typedef struct malloc_introspection_t {
|
||||
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
|
||||
size_t (*good_size)(malloc_zone_t *, size_t);
|
||||
boolean_t (*check)(malloc_zone_t *);
|
||||
void (*print)(malloc_zone_t *, boolean_t);
|
||||
void (*log)(malloc_zone_t *, void *);
|
||||
void (*force_lock)(malloc_zone_t *);
|
||||
void (*force_unlock)(malloc_zone_t *);
|
||||
void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
|
||||
boolean_t (*zone_locked)(malloc_zone_t *);
|
||||
boolean_t (*enable_discharge_checking)(malloc_zone_t *);
|
||||
boolean_t (*disable_discharge_checking)(malloc_zone_t *);
|
||||
void (*discharge)(malloc_zone_t *, void *);
|
||||
#ifdef __BLOCKS__
|
||||
void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
|
||||
#else
|
||||
void *enumerate_unavailable_without_blocks;
|
||||
#endif
|
||||
void (*reinit_lock)(malloc_zone_t *);
|
||||
} malloc_introspection_t;
|
||||
|
||||
extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
|
||||
|
||||
extern malloc_zone_t *malloc_default_zone(void);
|
||||
|
||||
extern void malloc_zone_register(malloc_zone_t *zone);
|
||||
|
||||
extern void malloc_zone_unregister(malloc_zone_t *zone);
|
||||
|
||||
/*
|
||||
* The malloc_default_purgeable_zone() function is only available on >= 10.6.
|
||||
* We need to check whether it is present at runtime, thus the weak_import.
|
||||
*/
|
||||
extern malloc_zone_t *malloc_default_purgeable_zone(void)
|
||||
JEMALLOC_ATTR(weak_import);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
static malloc_zone_t *default_zone, *purgeable_zone;
|
||||
static malloc_zone_t jemalloc_zone;
|
||||
static struct malloc_introspection_t jemalloc_zone_introspect;
|
||||
static pid_t zone_force_lock_pid = -1;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static size_t zone_size(malloc_zone_t *zone, const void *ptr);
|
||||
static void *zone_malloc(malloc_zone_t *zone, size_t size);
|
||||
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
|
||||
static void *zone_valloc(malloc_zone_t *zone, size_t size);
|
||||
static void zone_free(malloc_zone_t *zone, void *ptr);
|
||||
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
|
||||
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
|
||||
size_t size);
|
||||
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
|
||||
size_t size);
|
||||
static void zone_destroy(malloc_zone_t *zone);
|
||||
static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
|
||||
void **results, unsigned num_requested);
|
||||
static void zone_batch_free(struct _malloc_zone_t *zone,
|
||||
void **to_be_freed, unsigned num_to_be_freed);
|
||||
static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
|
||||
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
|
||||
static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
|
||||
vm_address_t zone_address, memory_reader_t reader,
|
||||
vm_range_recorder_t recorder);
|
||||
static boolean_t zone_check(malloc_zone_t *zone);
|
||||
static void zone_print(malloc_zone_t *zone, boolean_t verbose);
|
||||
static void zone_log(malloc_zone_t *zone, void *address);
|
||||
static void zone_force_lock(malloc_zone_t *zone);
|
||||
static void zone_force_unlock(malloc_zone_t *zone);
|
||||
static void zone_statistics(malloc_zone_t *zone,
|
||||
malloc_statistics_t *stats);
|
||||
static boolean_t zone_locked(malloc_zone_t *zone);
|
||||
static void zone_reinit_lock(malloc_zone_t *zone);
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Functions.
|
||||
*/
|
||||
|
||||
static size_t
|
||||
zone_size(malloc_zone_t *zone, const void *ptr) {
|
||||
/*
|
||||
* There appear to be places within Darwin (such as setenv(3)) that
|
||||
* cause calls to this function with pointers that *no* zone owns. If
|
||||
* we knew that all pointers were owned by *some* zone, we could split
|
||||
* our zone into two parts, and use one as the default allocator and
|
||||
* the other as the default deallocator/reallocator. Since that will
|
||||
* not work in practice, we must check all pointers to assure that they
|
||||
* reside within a mapped extent before determining size.
|
||||
*/
|
||||
return ivsalloc(tsdn_fetch(), ptr);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_malloc(malloc_zone_t *zone, size_t size) {
|
||||
return je_malloc(size);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
|
||||
return je_calloc(num, size);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_valloc(malloc_zone_t *zone, size_t size) {
|
||||
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
||||
|
||||
je_posix_memalign(&ret, PAGE, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_free(malloc_zone_t *zone, void *ptr) {
|
||||
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
|
||||
je_free(ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
|
||||
return je_realloc(ptr, size);
|
||||
}
|
||||
|
||||
return realloc(ptr, size);
|
||||
}
|
||||
|
||||
static void *
|
||||
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
|
||||
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
||||
|
||||
je_posix_memalign(&ret, alignment, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||
size_t alloc_size;
|
||||
|
||||
alloc_size = ivsalloc(tsdn_fetch(), ptr);
|
||||
if (alloc_size != 0) {
|
||||
assert(alloc_size == size);
|
||||
je_free(ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
static void
|
||||
zone_destroy(malloc_zone_t *zone) {
|
||||
/* This function should never be called. */
|
||||
not_reached();
|
||||
}
|
||||
|
||||
static unsigned
|
||||
zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
|
||||
unsigned num_requested) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < num_requested; i++) {
|
||||
results[i] = je_malloc(size);
|
||||
if (!results[i])
|
||||
break;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
|
||||
unsigned num_to_be_freed) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < num_to_be_freed; i++) {
|
||||
zone_free(zone, to_be_freed[i]);
|
||||
to_be_freed[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t
|
||||
zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t
|
||||
zone_good_size(malloc_zone_t *zone, size_t size) {
|
||||
if (size == 0) {
|
||||
size = 1;
|
||||
}
|
||||
return sz_s2u(size);
|
||||
}
|
||||
|
||||
static kern_return_t
|
||||
zone_enumerator(task_t task, void *data, unsigned type_mask,
|
||||
vm_address_t zone_address, memory_reader_t reader,
|
||||
vm_range_recorder_t recorder) {
|
||||
return KERN_SUCCESS;
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
zone_check(malloc_zone_t *zone) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_print(malloc_zone_t *zone, boolean_t verbose) {
|
||||
}
|
||||
|
||||
static void
|
||||
zone_log(malloc_zone_t *zone, void *address) {
|
||||
}
|
||||
|
||||
static void
|
||||
zone_force_lock(malloc_zone_t *zone) {
|
||||
if (isthreaded) {
|
||||
/*
|
||||
* See the note in zone_force_unlock, below, to see why we need
|
||||
* this.
|
||||
*/
|
||||
assert(zone_force_lock_pid == -1);
|
||||
zone_force_lock_pid = getpid();
|
||||
jemalloc_prefork();
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
zone_force_unlock(malloc_zone_t *zone) {
|
||||
/*
|
||||
* zone_force_lock and zone_force_unlock are the entry points to the
|
||||
* forking machinery on OS X. The tricky thing is, the child is not
|
||||
* allowed to unlock mutexes locked in the parent, even if owned by the
|
||||
* forking thread (and the mutex type we use in OS X will fail an assert
|
||||
* if we try). In the child, we can get away with reinitializing all
|
||||
* the mutexes, which has the effect of unlocking them. In the parent,
|
||||
* doing this would mean we wouldn't wake any waiters blocked on the
|
||||
* mutexes we unlock. So, we record the pid of the current thread in
|
||||
* zone_force_lock, and use that to detect if we're in the parent or
|
||||
* child here, to decide which unlock logic we need.
|
||||
*/
|
||||
if (isthreaded) {
|
||||
assert(zone_force_lock_pid != -1);
|
||||
if (getpid() == zone_force_lock_pid) {
|
||||
jemalloc_postfork_parent();
|
||||
} else {
|
||||
jemalloc_postfork_child();
|
||||
}
|
||||
zone_force_lock_pid = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
|
||||
/* We make no effort to actually fill the values */
|
||||
stats->blocks_in_use = 0;
|
||||
stats->size_in_use = 0;
|
||||
stats->max_size_in_use = 0;
|
||||
stats->size_allocated = 0;
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
zone_locked(malloc_zone_t *zone) {
|
||||
/* Pretend no lock is being held */
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
zone_reinit_lock(malloc_zone_t *zone) {
|
||||
/* As of OSX 10.12, this function is only used when force_unlock would
|
||||
* be used if the zone version were < 9. So just use force_unlock. */
|
||||
zone_force_unlock(zone);
|
||||
}
|
||||
|
||||
static void
|
||||
zone_init(void) {
|
||||
jemalloc_zone.size = zone_size;
|
||||
jemalloc_zone.malloc = zone_malloc;
|
||||
jemalloc_zone.calloc = zone_calloc;
|
||||
jemalloc_zone.valloc = zone_valloc;
|
||||
jemalloc_zone.free = zone_free;
|
||||
jemalloc_zone.realloc = zone_realloc;
|
||||
jemalloc_zone.destroy = zone_destroy;
|
||||
jemalloc_zone.zone_name = "jemalloc_zone";
|
||||
jemalloc_zone.batch_malloc = zone_batch_malloc;
|
||||
jemalloc_zone.batch_free = zone_batch_free;
|
||||
jemalloc_zone.introspect = &jemalloc_zone_introspect;
|
||||
jemalloc_zone.version = 9;
|
||||
jemalloc_zone.memalign = zone_memalign;
|
||||
jemalloc_zone.free_definite_size = zone_free_definite_size;
|
||||
jemalloc_zone.pressure_relief = zone_pressure_relief;
|
||||
|
||||
jemalloc_zone_introspect.enumerator = zone_enumerator;
|
||||
jemalloc_zone_introspect.good_size = zone_good_size;
|
||||
jemalloc_zone_introspect.check = zone_check;
|
||||
jemalloc_zone_introspect.print = zone_print;
|
||||
jemalloc_zone_introspect.log = zone_log;
|
||||
jemalloc_zone_introspect.force_lock = zone_force_lock;
|
||||
jemalloc_zone_introspect.force_unlock = zone_force_unlock;
|
||||
jemalloc_zone_introspect.statistics = zone_statistics;
|
||||
jemalloc_zone_introspect.zone_locked = zone_locked;
|
||||
jemalloc_zone_introspect.enable_discharge_checking = NULL;
|
||||
jemalloc_zone_introspect.disable_discharge_checking = NULL;
|
||||
jemalloc_zone_introspect.discharge = NULL;
|
||||
#ifdef __BLOCKS__
|
||||
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
|
||||
#else
|
||||
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
|
||||
#endif
|
||||
jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
|
||||
}
|
||||
|
||||
static malloc_zone_t *
|
||||
zone_default_get(void) {
|
||||
malloc_zone_t **zones = NULL;
|
||||
unsigned int num_zones = 0;
|
||||
|
||||
/*
|
||||
* On OSX 10.12, malloc_default_zone returns a special zone that is not
|
||||
* present in the list of registered zones. That zone uses a "lite zone"
|
||||
* if one is present (apparently enabled when malloc stack logging is
|
||||
* enabled), or the first registered zone otherwise. In practice this
|
||||
* means unless malloc stack logging is enabled, the first registered
|
||||
* zone is the default. So get the list of zones to get the first one,
|
||||
* instead of relying on malloc_default_zone.
|
||||
*/
|
||||
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
|
||||
(vm_address_t**)&zones, &num_zones)) {
|
||||
/*
|
||||
* Reset the value in case the failure happened after it was
|
||||
* set.
|
||||
*/
|
||||
num_zones = 0;
|
||||
}
|
||||
|
||||
if (num_zones) {
|
||||
return zones[0];
|
||||
}
|
||||
|
||||
return malloc_default_zone();
|
||||
}
|
||||
|
||||
/* As written, this function can only promote jemalloc_zone. */
|
||||
static void
|
||||
zone_promote(void) {
|
||||
malloc_zone_t *zone;
|
||||
|
||||
do {
|
||||
/*
|
||||
* Unregister and reregister the default zone. On OSX >= 10.6,
|
||||
* unregistering takes the last registered zone and places it
|
||||
* at the location of the specified zone. Unregistering the
|
||||
* default zone thus makes the last registered one the default.
|
||||
* On OSX < 10.6, unregistering shifts all registered zones.
|
||||
* The first registered zone then becomes the default.
|
||||
*/
|
||||
malloc_zone_unregister(default_zone);
|
||||
malloc_zone_register(default_zone);
|
||||
|
||||
/*
|
||||
* On OSX 10.6, having the default purgeable zone appear before
|
||||
* the default zone makes some things crash because it thinks it
|
||||
* owns the default zone allocated pointers. We thus
|
||||
* unregister/re-register it in order to ensure it's always
|
||||
* after the default zone. On OSX < 10.6, there is no purgeable
|
||||
* zone, so this does nothing. On OSX >= 10.6, unregistering
|
||||
* replaces the purgeable zone with the last registered zone
|
||||
* above, i.e. the default zone. Registering it again then puts
|
||||
* it at the end, obviously after the default zone.
|
||||
*/
|
||||
if (purgeable_zone != NULL) {
|
||||
malloc_zone_unregister(purgeable_zone);
|
||||
malloc_zone_register(purgeable_zone);
|
||||
}
|
||||
|
||||
zone = zone_default_get();
|
||||
} while (zone != &jemalloc_zone);
|
||||
}
|
||||
|
||||
JEMALLOC_ATTR(constructor)
|
||||
void
|
||||
zone_register(void) {
|
||||
/*
|
||||
* If something else replaced the system default zone allocator, don't
|
||||
* register jemalloc's.
|
||||
*/
|
||||
default_zone = zone_default_get();
|
||||
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
|
||||
"DefaultMallocZone") != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The default purgeable zone is created lazily by OSX's libc. It uses
|
||||
* the default zone when it is created for "small" allocations
|
||||
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
|
||||
* obviously fails when the default zone is the jemalloc zone, so
|
||||
* malloc_default_purgeable_zone() is called beforehand so that the
|
||||
* default purgeable zone is created when the default zone is still
|
||||
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
|
||||
* to check for the existence of malloc_default_purgeable_zone() at
|
||||
* run time.
|
||||
*/
|
||||
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
|
||||
malloc_default_purgeable_zone();
|
||||
|
||||
/* Register the custom zone. At this point it won't be the default. */
|
||||
zone_init();
|
||||
malloc_zone_register(&jemalloc_zone);
|
||||
|
||||
/* Promote the custom zone to be default. */
|
||||
zone_promote();
|
||||
}
|
||||
Reference in New Issue
Block a user